Initial commit
This commit is contained in:
55
node_modules/mongodb/lib/core/auth/auth_provider.js
generated
vendored
Normal file
55
node_modules/mongodb/lib/core/auth/auth_provider.js
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
'use strict';
|
||||
|
||||
/**
|
||||
* Context used during authentication
|
||||
*
|
||||
* @property {Connection} connection The connection to authenticate
|
||||
* @property {MongoCredentials} credentials The credentials to use for authentication
|
||||
* @property {object} options The options passed to the `connect` method
|
||||
* @property {object?} response The response of the initial handshake
|
||||
* @property {Buffer?} nonce A random nonce generated for use in an authentication conversation
|
||||
*/
|
||||
class AuthContext {
|
||||
constructor(connection, credentials, options) {
|
||||
this.connection = connection;
|
||||
this.credentials = credentials;
|
||||
this.options = options;
|
||||
}
|
||||
}
|
||||
|
||||
class AuthProvider {
|
||||
constructor(bson) {
|
||||
this.bson = bson;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare the handshake document before the initial handshake.
|
||||
*
|
||||
* @param {object} handshakeDoc The document used for the initial handshake on a connection
|
||||
* @param {AuthContext} authContext Context for authentication flow
|
||||
* @param {function} callback
|
||||
*/
|
||||
prepare(handshakeDoc, context, callback) {
|
||||
callback(undefined, handshakeDoc);
|
||||
}
|
||||
|
||||
/**
|
||||
* Authenticate
|
||||
*
|
||||
* @param {AuthContext} context A shared context for authentication flow
|
||||
* @param {authResultCallback} callback The callback to return the result from the authentication
|
||||
*/
|
||||
auth(context, callback) {
|
||||
callback(new TypeError('`auth` method must be overridden by subclass'));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a result from an authentication provider
|
||||
*
|
||||
* @callback authResultCallback
|
||||
* @param {error} error An error object. Set to null if no error present
|
||||
* @param {boolean} result The result of the authentication process
|
||||
*/
|
||||
|
||||
module.exports = { AuthContext, AuthProvider };
|
||||
29
node_modules/mongodb/lib/core/auth/defaultAuthProviders.js
generated
vendored
Normal file
29
node_modules/mongodb/lib/core/auth/defaultAuthProviders.js
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
'use strict';
|
||||
|
||||
const MongoCR = require('./mongocr');
|
||||
const X509 = require('./x509');
|
||||
const Plain = require('./plain');
|
||||
const GSSAPI = require('./gssapi');
|
||||
const ScramSHA1 = require('./scram').ScramSHA1;
|
||||
const ScramSHA256 = require('./scram').ScramSHA256;
|
||||
const MongoDBAWS = require('./mongodb_aws');
|
||||
|
||||
/**
|
||||
* Returns the default authentication providers.
|
||||
*
|
||||
* @param {BSON} bson Bson definition
|
||||
* @returns {Object} a mapping of auth names to auth types
|
||||
*/
|
||||
function defaultAuthProviders(bson) {
|
||||
return {
|
||||
'mongodb-aws': new MongoDBAWS(bson),
|
||||
mongocr: new MongoCR(bson),
|
||||
x509: new X509(bson),
|
||||
plain: new Plain(bson),
|
||||
gssapi: new GSSAPI(bson),
|
||||
'scram-sha-1': new ScramSHA1(bson),
|
||||
'scram-sha-256': new ScramSHA256(bson)
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = { defaultAuthProviders };
|
||||
151
node_modules/mongodb/lib/core/auth/gssapi.js
generated
vendored
Normal file
151
node_modules/mongodb/lib/core/auth/gssapi.js
generated
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
'use strict';
|
||||
const dns = require('dns');
|
||||
|
||||
const AuthProvider = require('./auth_provider').AuthProvider;
|
||||
const retrieveKerberos = require('../utils').retrieveKerberos;
|
||||
const MongoError = require('../error').MongoError;
|
||||
|
||||
const kGssapiClient = Symbol('GSSAPI_CLIENT');
|
||||
let kerberos;
|
||||
|
||||
class GSSAPI extends AuthProvider {
|
||||
prepare(handshakeDoc, authContext, callback) {
|
||||
const host = authContext.options.host;
|
||||
const port = authContext.options.port;
|
||||
const credentials = authContext.credentials;
|
||||
if (!host || !port || !credentials) {
|
||||
return callback(
|
||||
new MongoError(
|
||||
`Connection must specify: ${host ? 'host' : ''}, ${port ? 'port' : ''}, ${
|
||||
credentials ? 'host' : 'credentials'
|
||||
}.`
|
||||
)
|
||||
);
|
||||
}
|
||||
if (kerberos == null) {
|
||||
try {
|
||||
kerberos = retrieveKerberos();
|
||||
} catch (e) {
|
||||
return callback(e);
|
||||
}
|
||||
}
|
||||
const username = credentials.username;
|
||||
const password = credentials.password;
|
||||
const mechanismProperties = credentials.mechanismProperties;
|
||||
const serviceName =
|
||||
mechanismProperties['gssapiservicename'] ||
|
||||
mechanismProperties['gssapiServiceName'] ||
|
||||
'mongodb';
|
||||
performGssapiCanonicalizeHostName(host, mechanismProperties, (err, host) => {
|
||||
if (err) return callback(err);
|
||||
const initOptions = {};
|
||||
if (password != null) {
|
||||
Object.assign(initOptions, { user: username, password: password });
|
||||
}
|
||||
kerberos.initializeClient(
|
||||
`${serviceName}${process.platform === 'win32' ? '/' : '@'}${host}`,
|
||||
initOptions,
|
||||
(err, client) => {
|
||||
if (err) return callback(new MongoError(err));
|
||||
if (client == null) return callback();
|
||||
this[kGssapiClient] = client;
|
||||
callback(undefined, handshakeDoc);
|
||||
}
|
||||
);
|
||||
});
|
||||
}
|
||||
auth(authContext, callback) {
|
||||
const connection = authContext.connection;
|
||||
const credentials = authContext.credentials;
|
||||
if (credentials == null) return callback(new MongoError('credentials required'));
|
||||
const username = credentials.username;
|
||||
const client = this[kGssapiClient];
|
||||
if (client == null) return callback(new MongoError('gssapi client missing'));
|
||||
function externalCommand(command, cb) {
|
||||
return connection.command('$external.$cmd', command, cb);
|
||||
}
|
||||
client.step('', (err, payload) => {
|
||||
if (err) return callback(err);
|
||||
externalCommand(saslStart(payload), (err, response) => {
|
||||
const result = response.result;
|
||||
if (err) return callback(err);
|
||||
negotiate(client, 10, result.payload, (err, payload) => {
|
||||
if (err) return callback(err);
|
||||
externalCommand(saslContinue(payload, result.conversationId), (err, response) => {
|
||||
const result = response.result;
|
||||
if (err) return callback(err);
|
||||
finalize(client, username, result.payload, (err, payload) => {
|
||||
if (err) return callback(err);
|
||||
externalCommand(
|
||||
{
|
||||
saslContinue: 1,
|
||||
conversationId: result.conversationId,
|
||||
payload
|
||||
},
|
||||
(err, result) => {
|
||||
if (err) return callback(err);
|
||||
callback(undefined, result);
|
||||
}
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
module.exports = GSSAPI;
|
||||
|
||||
function saslStart(payload) {
|
||||
return {
|
||||
saslStart: 1,
|
||||
mechanism: 'GSSAPI',
|
||||
payload,
|
||||
autoAuthorize: 1
|
||||
};
|
||||
}
|
||||
function saslContinue(payload, conversationId) {
|
||||
return {
|
||||
saslContinue: 1,
|
||||
conversationId,
|
||||
payload
|
||||
};
|
||||
}
|
||||
function negotiate(client, retries, payload, callback) {
|
||||
client.step(payload, (err, response) => {
|
||||
// Retries exhausted, raise error
|
||||
if (err && retries === 0) return callback(err);
|
||||
// Adjust number of retries and call step again
|
||||
if (err) return negotiate(client, retries - 1, payload, callback);
|
||||
// Return the payload
|
||||
callback(undefined, response || '');
|
||||
});
|
||||
}
|
||||
function finalize(client, user, payload, callback) {
|
||||
// GSS Client Unwrap
|
||||
client.unwrap(payload, (err, response) => {
|
||||
if (err) return callback(err);
|
||||
// Wrap the response
|
||||
client.wrap(response || '', { user }, (err, wrapped) => {
|
||||
if (err) return callback(err);
|
||||
// Return the payload
|
||||
callback(undefined, wrapped);
|
||||
});
|
||||
});
|
||||
}
|
||||
function performGssapiCanonicalizeHostName(host, mechanismProperties, callback) {
|
||||
const canonicalizeHostName =
|
||||
typeof mechanismProperties.gssapiCanonicalizeHostName === 'boolean'
|
||||
? mechanismProperties.gssapiCanonicalizeHostName
|
||||
: false;
|
||||
if (!canonicalizeHostName) return callback(undefined, host);
|
||||
// Attempt to resolve the host name
|
||||
dns.resolveCname(host, (err, r) => {
|
||||
if (err) return callback(err);
|
||||
// Get the first resolve host id
|
||||
if (Array.isArray(r) && r.length > 0) {
|
||||
return callback(undefined, r[0]);
|
||||
}
|
||||
callback(undefined, host);
|
||||
});
|
||||
}
|
||||
107
node_modules/mongodb/lib/core/auth/mongo_credentials.js
generated
vendored
Normal file
107
node_modules/mongodb/lib/core/auth/mongo_credentials.js
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
'use strict';
|
||||
|
||||
// Resolves the default auth mechanism according to
|
||||
// https://github.com/mongodb/specifications/blob/master/source/auth/auth.rst
|
||||
function getDefaultAuthMechanism(ismaster) {
|
||||
if (ismaster) {
|
||||
// If ismaster contains saslSupportedMechs, use scram-sha-256
|
||||
// if it is available, else scram-sha-1
|
||||
if (Array.isArray(ismaster.saslSupportedMechs)) {
|
||||
return ismaster.saslSupportedMechs.indexOf('SCRAM-SHA-256') >= 0
|
||||
? 'scram-sha-256'
|
||||
: 'scram-sha-1';
|
||||
}
|
||||
|
||||
// Fallback to legacy selection method. If wire version >= 3, use scram-sha-1
|
||||
if (ismaster.maxWireVersion >= 3) {
|
||||
return 'scram-sha-1';
|
||||
}
|
||||
}
|
||||
|
||||
// Default for wireprotocol < 3
|
||||
return 'mongocr';
|
||||
}
|
||||
|
||||
/**
|
||||
* A representation of the credentials used by MongoDB
|
||||
* @class
|
||||
* @property {string} mechanism The method used to authenticate
|
||||
* @property {string} [username] The username used for authentication
|
||||
* @property {string} [password] The password used for authentication
|
||||
* @property {string} [source] The database that the user should authenticate against
|
||||
* @property {object} [mechanismProperties] Special properties used by some types of auth mechanisms
|
||||
*/
|
||||
class MongoCredentials {
|
||||
/**
|
||||
* Creates a new MongoCredentials object
|
||||
* @param {object} [options]
|
||||
* @param {string} [options.username] The username used for authentication
|
||||
* @param {string} [options.password] The password used for authentication
|
||||
* @param {string} [options.source] The database that the user should authenticate against
|
||||
* @param {string} [options.mechanism] The method used to authenticate
|
||||
* @param {object} [options.mechanismProperties] Special properties used by some types of auth mechanisms
|
||||
*/
|
||||
constructor(options) {
|
||||
options = options || {};
|
||||
this.username = options.username;
|
||||
this.password = options.password;
|
||||
this.source = options.source || options.db;
|
||||
this.mechanism = options.mechanism || 'default';
|
||||
this.mechanismProperties = options.mechanismProperties || {};
|
||||
|
||||
if (this.mechanism.match(/MONGODB-AWS/i)) {
|
||||
if (this.username == null && process.env.AWS_ACCESS_KEY_ID) {
|
||||
this.username = process.env.AWS_ACCESS_KEY_ID;
|
||||
}
|
||||
|
||||
if (this.password == null && process.env.AWS_SECRET_ACCESS_KEY) {
|
||||
this.password = process.env.AWS_SECRET_ACCESS_KEY;
|
||||
}
|
||||
|
||||
if (this.mechanismProperties.AWS_SESSION_TOKEN == null && process.env.AWS_SESSION_TOKEN) {
|
||||
this.mechanismProperties.AWS_SESSION_TOKEN = process.env.AWS_SESSION_TOKEN;
|
||||
}
|
||||
}
|
||||
|
||||
Object.freeze(this.mechanismProperties);
|
||||
Object.freeze(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if two MongoCredentials objects are equivalent
|
||||
* @param {MongoCredentials} other another MongoCredentials object
|
||||
* @returns {boolean} true if the two objects are equal.
|
||||
*/
|
||||
equals(other) {
|
||||
return (
|
||||
this.mechanism === other.mechanism &&
|
||||
this.username === other.username &&
|
||||
this.password === other.password &&
|
||||
this.source === other.source
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* If the authentication mechanism is set to "default", resolves the authMechanism
|
||||
* based on the server version and server supported sasl mechanisms.
|
||||
*
|
||||
* @param {Object} [ismaster] An ismaster response from the server
|
||||
* @returns {MongoCredentials}
|
||||
*/
|
||||
resolveAuthMechanism(ismaster) {
|
||||
// If the mechanism is not "default", then it does not need to be resolved
|
||||
if (this.mechanism.match(/DEFAULT/i)) {
|
||||
return new MongoCredentials({
|
||||
username: this.username,
|
||||
password: this.password,
|
||||
source: this.source,
|
||||
mechanism: getDefaultAuthMechanism(ismaster),
|
||||
mechanismProperties: this.mechanismProperties
|
||||
});
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { MongoCredentials };
|
||||
45
node_modules/mongodb/lib/core/auth/mongocr.js
generated
vendored
Normal file
45
node_modules/mongodb/lib/core/auth/mongocr.js
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
'use strict';
|
||||
|
||||
const crypto = require('crypto');
|
||||
const AuthProvider = require('./auth_provider').AuthProvider;
|
||||
|
||||
class MongoCR extends AuthProvider {
|
||||
auth(authContext, callback) {
|
||||
const connection = authContext.connection;
|
||||
const credentials = authContext.credentials;
|
||||
const username = credentials.username;
|
||||
const password = credentials.password;
|
||||
const source = credentials.source;
|
||||
|
||||
connection.command(`${source}.$cmd`, { getnonce: 1 }, (err, result) => {
|
||||
let nonce = null;
|
||||
let key = null;
|
||||
|
||||
// Get nonce
|
||||
if (err == null) {
|
||||
const r = result.result;
|
||||
nonce = r.nonce;
|
||||
// Use node md5 generator
|
||||
let md5 = crypto.createHash('md5');
|
||||
// Generate keys used for authentication
|
||||
md5.update(username + ':mongo:' + password, 'utf8');
|
||||
const hash_password = md5.digest('hex');
|
||||
// Final key
|
||||
md5 = crypto.createHash('md5');
|
||||
md5.update(nonce + username + hash_password, 'utf8');
|
||||
key = md5.digest('hex');
|
||||
}
|
||||
|
||||
const authenticateCommand = {
|
||||
authenticate: 1,
|
||||
user: username,
|
||||
nonce,
|
||||
key
|
||||
};
|
||||
|
||||
connection.command(`${source}.$cmd`, authenticateCommand, callback);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = MongoCR;
|
||||
256
node_modules/mongodb/lib/core/auth/mongodb_aws.js
generated
vendored
Normal file
256
node_modules/mongodb/lib/core/auth/mongodb_aws.js
generated
vendored
Normal file
@@ -0,0 +1,256 @@
|
||||
'use strict';
|
||||
const AuthProvider = require('./auth_provider').AuthProvider;
|
||||
const MongoCredentials = require('./mongo_credentials').MongoCredentials;
|
||||
const MongoError = require('../error').MongoError;
|
||||
const crypto = require('crypto');
|
||||
const http = require('http');
|
||||
const maxWireVersion = require('../utils').maxWireVersion;
|
||||
const url = require('url');
|
||||
|
||||
let aws4;
|
||||
try {
|
||||
aws4 = require('aws4');
|
||||
} catch (e) {
|
||||
// don't do anything;
|
||||
}
|
||||
|
||||
const ASCII_N = 110;
|
||||
const AWS_RELATIVE_URI = 'http://169.254.170.2';
|
||||
const AWS_EC2_URI = 'http://169.254.169.254';
|
||||
const AWS_EC2_PATH = '/latest/meta-data/iam/security-credentials';
|
||||
|
||||
class MongoDBAWS extends AuthProvider {
|
||||
auth(authContext, callback) {
|
||||
const connection = authContext.connection;
|
||||
const credentials = authContext.credentials;
|
||||
|
||||
if (maxWireVersion(connection) < 9) {
|
||||
callback(new MongoError('MONGODB-AWS authentication requires MongoDB version 4.4 or later'));
|
||||
return;
|
||||
}
|
||||
|
||||
if (aws4 == null) {
|
||||
callback(
|
||||
new MongoError(
|
||||
'MONGODB-AWS authentication requires the `aws4` module, please install it as a dependency of your project'
|
||||
)
|
||||
);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (credentials.username == null) {
|
||||
makeTempCredentials(credentials, (err, tempCredentials) => {
|
||||
if (err) return callback(err);
|
||||
|
||||
authContext.credentials = tempCredentials;
|
||||
this.auth(authContext, callback);
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
const username = credentials.username;
|
||||
const password = credentials.password;
|
||||
const db = credentials.source;
|
||||
const token = credentials.mechanismProperties.AWS_SESSION_TOKEN;
|
||||
const bson = this.bson;
|
||||
|
||||
crypto.randomBytes(32, (err, nonce) => {
|
||||
if (err) {
|
||||
callback(err);
|
||||
return;
|
||||
}
|
||||
|
||||
const saslStart = {
|
||||
saslStart: 1,
|
||||
mechanism: 'MONGODB-AWS',
|
||||
payload: bson.serialize({ r: nonce, p: ASCII_N })
|
||||
};
|
||||
|
||||
connection.command(`${db}.$cmd`, saslStart, (err, result) => {
|
||||
if (err) return callback(err);
|
||||
|
||||
const res = result.result;
|
||||
const serverResponse = bson.deserialize(res.payload.buffer);
|
||||
const host = serverResponse.h;
|
||||
const serverNonce = serverResponse.s.buffer;
|
||||
if (serverNonce.length !== 64) {
|
||||
callback(
|
||||
new MongoError(`Invalid server nonce length ${serverNonce.length}, expected 64`)
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if (serverNonce.compare(nonce, 0, nonce.length, 0, nonce.length) !== 0) {
|
||||
callback(new MongoError('Server nonce does not begin with client nonce'));
|
||||
return;
|
||||
}
|
||||
|
||||
if (host.length < 1 || host.length > 255 || host.indexOf('..') !== -1) {
|
||||
callback(new MongoError(`Server returned an invalid host: "${host}"`));
|
||||
return;
|
||||
}
|
||||
|
||||
const body = 'Action=GetCallerIdentity&Version=2011-06-15';
|
||||
const options = aws4.sign(
|
||||
{
|
||||
method: 'POST',
|
||||
host,
|
||||
region: deriveRegion(serverResponse.h),
|
||||
service: 'sts',
|
||||
headers: {
|
||||
'Content-Type': 'application/x-www-form-urlencoded',
|
||||
'Content-Length': body.length,
|
||||
'X-MongoDB-Server-Nonce': serverNonce.toString('base64'),
|
||||
'X-MongoDB-GS2-CB-Flag': 'n'
|
||||
},
|
||||
path: '/',
|
||||
body
|
||||
},
|
||||
{
|
||||
accessKeyId: username,
|
||||
secretAccessKey: password,
|
||||
token
|
||||
}
|
||||
);
|
||||
|
||||
const authorization = options.headers.Authorization;
|
||||
const date = options.headers['X-Amz-Date'];
|
||||
const payload = { a: authorization, d: date };
|
||||
if (token) {
|
||||
payload.t = token;
|
||||
}
|
||||
|
||||
const saslContinue = {
|
||||
saslContinue: 1,
|
||||
conversationId: 1,
|
||||
payload: bson.serialize(payload)
|
||||
};
|
||||
|
||||
connection.command(`${db}.$cmd`, saslContinue, err => {
|
||||
if (err) return callback(err);
|
||||
callback();
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function makeTempCredentials(credentials, callback) {
|
||||
function done(creds) {
|
||||
if (creds.AccessKeyId == null || creds.SecretAccessKey == null || creds.Token == null) {
|
||||
callback(new MongoError('Could not obtain temporary MONGODB-AWS credentials'));
|
||||
return;
|
||||
}
|
||||
|
||||
callback(
|
||||
undefined,
|
||||
new MongoCredentials({
|
||||
username: creds.AccessKeyId,
|
||||
password: creds.SecretAccessKey,
|
||||
source: credentials.source,
|
||||
mechanism: 'MONGODB-AWS',
|
||||
mechanismProperties: {
|
||||
AWS_SESSION_TOKEN: creds.Token
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
// If the environment variable AWS_CONTAINER_CREDENTIALS_RELATIVE_URI
|
||||
// is set then drivers MUST assume that it was set by an AWS ECS agent
|
||||
if (process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI) {
|
||||
request(
|
||||
`${AWS_RELATIVE_URI}${process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI}`,
|
||||
(err, res) => {
|
||||
if (err) return callback(err);
|
||||
done(res);
|
||||
}
|
||||
);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Otherwise assume we are on an EC2 instance
|
||||
|
||||
// get a token
|
||||
|
||||
request(
|
||||
`${AWS_EC2_URI}/latest/api/token`,
|
||||
{ method: 'PUT', json: false, headers: { 'X-aws-ec2-metadata-token-ttl-seconds': 30 } },
|
||||
(err, token) => {
|
||||
if (err) return callback(err);
|
||||
|
||||
// get role name
|
||||
request(
|
||||
`${AWS_EC2_URI}/${AWS_EC2_PATH}`,
|
||||
{ json: false, headers: { 'X-aws-ec2-metadata-token': token } },
|
||||
(err, roleName) => {
|
||||
if (err) return callback(err);
|
||||
|
||||
// get temp credentials
|
||||
request(
|
||||
`${AWS_EC2_URI}/${AWS_EC2_PATH}/${roleName}`,
|
||||
{ headers: { 'X-aws-ec2-metadata-token': token } },
|
||||
(err, creds) => {
|
||||
if (err) return callback(err);
|
||||
done(creds);
|
||||
}
|
||||
);
|
||||
}
|
||||
);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
function deriveRegion(host) {
|
||||
const parts = host.split('.');
|
||||
if (parts.length === 1 || parts[1] === 'amazonaws') {
|
||||
return 'us-east-1';
|
||||
}
|
||||
|
||||
return parts[1];
|
||||
}
|
||||
|
||||
function request(uri, options, callback) {
|
||||
if (typeof options === 'function') {
|
||||
callback = options;
|
||||
options = {};
|
||||
}
|
||||
|
||||
options = Object.assign(
|
||||
{
|
||||
method: 'GET',
|
||||
timeout: 10000,
|
||||
json: true
|
||||
},
|
||||
url.parse(uri),
|
||||
options
|
||||
);
|
||||
|
||||
const req = http.request(options, res => {
|
||||
res.setEncoding('utf8');
|
||||
|
||||
let data = '';
|
||||
res.on('data', d => (data += d));
|
||||
res.on('end', () => {
|
||||
if (options.json === false) {
|
||||
callback(undefined, data);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(data);
|
||||
callback(undefined, parsed);
|
||||
} catch (err) {
|
||||
callback(new MongoError(`Invalid JSON response: "${data}"`));
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', err => callback(err));
|
||||
req.end();
|
||||
}
|
||||
|
||||
module.exports = MongoDBAWS;
|
||||
28
node_modules/mongodb/lib/core/auth/plain.js
generated
vendored
Normal file
28
node_modules/mongodb/lib/core/auth/plain.js
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
'use strict';
|
||||
const retrieveBSON = require('../connection/utils').retrieveBSON;
|
||||
const AuthProvider = require('./auth_provider').AuthProvider;
|
||||
|
||||
// TODO: can we get the Binary type from this.bson instead?
|
||||
const BSON = retrieveBSON();
|
||||
const Binary = BSON.Binary;
|
||||
|
||||
class Plain extends AuthProvider {
|
||||
auth(authContext, callback) {
|
||||
const connection = authContext.connection;
|
||||
const credentials = authContext.credentials;
|
||||
const username = credentials.username;
|
||||
const password = credentials.password;
|
||||
|
||||
const payload = new Binary(`\x00${username}\x00${password}`);
|
||||
const command = {
|
||||
saslStart: 1,
|
||||
mechanism: 'PLAIN',
|
||||
payload: payload,
|
||||
autoAuthorize: 1
|
||||
};
|
||||
|
||||
connection.command('$external.$cmd', command, callback);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = Plain;
|
||||
346
node_modules/mongodb/lib/core/auth/scram.js
generated
vendored
Normal file
346
node_modules/mongodb/lib/core/auth/scram.js
generated
vendored
Normal file
@@ -0,0 +1,346 @@
|
||||
'use strict';
|
||||
const crypto = require('crypto');
|
||||
const Buffer = require('safe-buffer').Buffer;
|
||||
const retrieveBSON = require('../connection/utils').retrieveBSON;
|
||||
const MongoError = require('../error').MongoError;
|
||||
const AuthProvider = require('./auth_provider').AuthProvider;
|
||||
|
||||
const BSON = retrieveBSON();
|
||||
const Binary = BSON.Binary;
|
||||
|
||||
let saslprep;
|
||||
try {
|
||||
saslprep = require('saslprep');
|
||||
} catch (e) {
|
||||
// don't do anything;
|
||||
}
|
||||
|
||||
class ScramSHA extends AuthProvider {
|
||||
constructor(bson, cryptoMethod) {
|
||||
super(bson);
|
||||
this.cryptoMethod = cryptoMethod || 'sha1';
|
||||
}
|
||||
|
||||
prepare(handshakeDoc, authContext, callback) {
|
||||
const cryptoMethod = this.cryptoMethod;
|
||||
if (cryptoMethod === 'sha256' && saslprep == null) {
|
||||
console.warn('Warning: no saslprep library specified. Passwords will not be sanitized');
|
||||
}
|
||||
|
||||
crypto.randomBytes(24, (err, nonce) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
|
||||
// store the nonce for later use
|
||||
Object.assign(authContext, { nonce });
|
||||
|
||||
const credentials = authContext.credentials;
|
||||
const request = Object.assign({}, handshakeDoc, {
|
||||
speculativeAuthenticate: Object.assign(makeFirstMessage(cryptoMethod, credentials, nonce), {
|
||||
db: credentials.source
|
||||
})
|
||||
});
|
||||
|
||||
callback(undefined, request);
|
||||
});
|
||||
}
|
||||
|
||||
auth(authContext, callback) {
|
||||
const response = authContext.response;
|
||||
if (response && response.speculativeAuthenticate) {
|
||||
continueScramConversation(
|
||||
this.cryptoMethod,
|
||||
response.speculativeAuthenticate,
|
||||
authContext,
|
||||
callback
|
||||
);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
executeScram(this.cryptoMethod, authContext, callback);
|
||||
}
|
||||
}
|
||||
|
||||
function cleanUsername(username) {
|
||||
return username.replace('=', '=3D').replace(',', '=2C');
|
||||
}
|
||||
|
||||
function clientFirstMessageBare(username, nonce) {
|
||||
// NOTE: This is done b/c Javascript uses UTF-16, but the server is hashing in UTF-8.
|
||||
// Since the username is not sasl-prep-d, we need to do this here.
|
||||
return Buffer.concat([
|
||||
Buffer.from('n=', 'utf8'),
|
||||
Buffer.from(username, 'utf8'),
|
||||
Buffer.from(',r=', 'utf8'),
|
||||
Buffer.from(nonce.toString('base64'), 'utf8')
|
||||
]);
|
||||
}
|
||||
|
||||
function makeFirstMessage(cryptoMethod, credentials, nonce) {
|
||||
const username = cleanUsername(credentials.username);
|
||||
const mechanism = cryptoMethod === 'sha1' ? 'SCRAM-SHA-1' : 'SCRAM-SHA-256';
|
||||
|
||||
// NOTE: This is done b/c Javascript uses UTF-16, but the server is hashing in UTF-8.
|
||||
// Since the username is not sasl-prep-d, we need to do this here.
|
||||
return {
|
||||
saslStart: 1,
|
||||
mechanism,
|
||||
payload: new Binary(
|
||||
Buffer.concat([Buffer.from('n,,', 'utf8'), clientFirstMessageBare(username, nonce)])
|
||||
),
|
||||
autoAuthorize: 1,
|
||||
options: { skipEmptyExchange: true }
|
||||
};
|
||||
}
|
||||
|
||||
function executeScram(cryptoMethod, authContext, callback) {
|
||||
const connection = authContext.connection;
|
||||
const credentials = authContext.credentials;
|
||||
const nonce = authContext.nonce;
|
||||
const db = credentials.source;
|
||||
|
||||
const saslStartCmd = makeFirstMessage(cryptoMethod, credentials, nonce);
|
||||
connection.command(`${db}.$cmd`, saslStartCmd, (_err, result) => {
|
||||
const err = resolveError(_err, result);
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
|
||||
continueScramConversation(cryptoMethod, result.result, authContext, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function continueScramConversation(cryptoMethod, response, authContext, callback) {
|
||||
const connection = authContext.connection;
|
||||
const credentials = authContext.credentials;
|
||||
const nonce = authContext.nonce;
|
||||
|
||||
const db = credentials.source;
|
||||
const username = cleanUsername(credentials.username);
|
||||
const password = credentials.password;
|
||||
|
||||
let processedPassword;
|
||||
if (cryptoMethod === 'sha256') {
|
||||
processedPassword = saslprep ? saslprep(password) : password;
|
||||
} else {
|
||||
try {
|
||||
processedPassword = passwordDigest(username, password);
|
||||
} catch (e) {
|
||||
return callback(e);
|
||||
}
|
||||
}
|
||||
|
||||
const payload = Buffer.isBuffer(response.payload)
|
||||
? new Binary(response.payload)
|
||||
: response.payload;
|
||||
const dict = parsePayload(payload.value());
|
||||
|
||||
const iterations = parseInt(dict.i, 10);
|
||||
if (iterations && iterations < 4096) {
|
||||
callback(new MongoError(`Server returned an invalid iteration count ${iterations}`), false);
|
||||
return;
|
||||
}
|
||||
|
||||
const salt = dict.s;
|
||||
const rnonce = dict.r;
|
||||
if (rnonce.startsWith('nonce')) {
|
||||
callback(new MongoError(`Server returned an invalid nonce: ${rnonce}`), false);
|
||||
return;
|
||||
}
|
||||
|
||||
// Set up start of proof
|
||||
const withoutProof = `c=biws,r=${rnonce}`;
|
||||
const saltedPassword = HI(
|
||||
processedPassword,
|
||||
Buffer.from(salt, 'base64'),
|
||||
iterations,
|
||||
cryptoMethod
|
||||
);
|
||||
|
||||
const clientKey = HMAC(cryptoMethod, saltedPassword, 'Client Key');
|
||||
const serverKey = HMAC(cryptoMethod, saltedPassword, 'Server Key');
|
||||
const storedKey = H(cryptoMethod, clientKey);
|
||||
const authMessage = [
|
||||
clientFirstMessageBare(username, nonce),
|
||||
payload.value().toString('base64'),
|
||||
withoutProof
|
||||
].join(',');
|
||||
|
||||
const clientSignature = HMAC(cryptoMethod, storedKey, authMessage);
|
||||
const clientProof = `p=${xor(clientKey, clientSignature)}`;
|
||||
const clientFinal = [withoutProof, clientProof].join(',');
|
||||
|
||||
const serverSignature = HMAC(cryptoMethod, serverKey, authMessage);
|
||||
const saslContinueCmd = {
|
||||
saslContinue: 1,
|
||||
conversationId: response.conversationId,
|
||||
payload: new Binary(Buffer.from(clientFinal))
|
||||
};
|
||||
|
||||
connection.command(`${db}.$cmd`, saslContinueCmd, (_err, result) => {
|
||||
const err = resolveError(_err, result);
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
|
||||
const r = result.result;
|
||||
const parsedResponse = parsePayload(r.payload.value());
|
||||
if (!compareDigest(Buffer.from(parsedResponse.v, 'base64'), serverSignature)) {
|
||||
callback(new MongoError('Server returned an invalid signature'));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!r || r.done !== false) {
|
||||
return callback(err, r);
|
||||
}
|
||||
|
||||
const retrySaslContinueCmd = {
|
||||
saslContinue: 1,
|
||||
conversationId: r.conversationId,
|
||||
payload: Buffer.alloc(0)
|
||||
};
|
||||
|
||||
connection.command(`${db}.$cmd`, retrySaslContinueCmd, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function parsePayload(payload) {
|
||||
const dict = {};
|
||||
const parts = payload.split(',');
|
||||
for (let i = 0; i < parts.length; i++) {
|
||||
const valueParts = parts[i].split('=');
|
||||
dict[valueParts[0]] = valueParts[1];
|
||||
}
|
||||
|
||||
return dict;
|
||||
}
|
||||
|
||||
function passwordDigest(username, password) {
|
||||
if (typeof username !== 'string') {
|
||||
throw new MongoError('username must be a string');
|
||||
}
|
||||
|
||||
if (typeof password !== 'string') {
|
||||
throw new MongoError('password must be a string');
|
||||
}
|
||||
|
||||
if (password.length === 0) {
|
||||
throw new MongoError('password cannot be empty');
|
||||
}
|
||||
|
||||
const md5 = crypto.createHash('md5');
|
||||
md5.update(`${username}:mongo:${password}`, 'utf8');
|
||||
return md5.digest('hex');
|
||||
}
|
||||
|
||||
// XOR two buffers
|
||||
function xor(a, b) {
|
||||
if (!Buffer.isBuffer(a)) {
|
||||
a = Buffer.from(a);
|
||||
}
|
||||
|
||||
if (!Buffer.isBuffer(b)) {
|
||||
b = Buffer.from(b);
|
||||
}
|
||||
|
||||
const length = Math.max(a.length, b.length);
|
||||
const res = [];
|
||||
|
||||
for (let i = 0; i < length; i += 1) {
|
||||
res.push(a[i] ^ b[i]);
|
||||
}
|
||||
|
||||
return Buffer.from(res).toString('base64');
|
||||
}
|
||||
|
||||
function H(method, text) {
|
||||
return crypto
|
||||
.createHash(method)
|
||||
.update(text)
|
||||
.digest();
|
||||
}
|
||||
|
||||
function HMAC(method, key, text) {
|
||||
return crypto
|
||||
.createHmac(method, key)
|
||||
.update(text)
|
||||
.digest();
|
||||
}
|
||||
|
||||
let _hiCache = {};
|
||||
let _hiCacheCount = 0;
|
||||
function _hiCachePurge() {
|
||||
_hiCache = {};
|
||||
_hiCacheCount = 0;
|
||||
}
|
||||
|
||||
const hiLengthMap = {
|
||||
sha256: 32,
|
||||
sha1: 20
|
||||
};
|
||||
|
||||
function HI(data, salt, iterations, cryptoMethod) {
|
||||
// omit the work if already generated
|
||||
const key = [data, salt.toString('base64'), iterations].join('_');
|
||||
if (_hiCache[key] !== undefined) {
|
||||
return _hiCache[key];
|
||||
}
|
||||
|
||||
// generate the salt
|
||||
const saltedData = crypto.pbkdf2Sync(
|
||||
data,
|
||||
salt,
|
||||
iterations,
|
||||
hiLengthMap[cryptoMethod],
|
||||
cryptoMethod
|
||||
);
|
||||
|
||||
// cache a copy to speed up the next lookup, but prevent unbounded cache growth
|
||||
if (_hiCacheCount >= 200) {
|
||||
_hiCachePurge();
|
||||
}
|
||||
|
||||
_hiCache[key] = saltedData;
|
||||
_hiCacheCount += 1;
|
||||
return saltedData;
|
||||
}
|
||||
|
||||
function compareDigest(lhs, rhs) {
|
||||
if (lhs.length !== rhs.length) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (typeof crypto.timingSafeEqual === 'function') {
|
||||
return crypto.timingSafeEqual(lhs, rhs);
|
||||
}
|
||||
|
||||
let result = 0;
|
||||
for (let i = 0; i < lhs.length; i++) {
|
||||
result |= lhs[i] ^ rhs[i];
|
||||
}
|
||||
|
||||
return result === 0;
|
||||
}
|
||||
|
||||
function resolveError(err, result) {
|
||||
if (err) return err;
|
||||
|
||||
const r = result.result;
|
||||
if (r.$err || r.errmsg) return new MongoError(r);
|
||||
}
|
||||
|
||||
class ScramSHA1 extends ScramSHA {
|
||||
constructor(bson) {
|
||||
super(bson, 'sha1');
|
||||
}
|
||||
}
|
||||
|
||||
class ScramSHA256 extends ScramSHA {
|
||||
constructor(bson) {
|
||||
super(bson, 'sha256');
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { ScramSHA1, ScramSHA256 };
|
||||
35
node_modules/mongodb/lib/core/auth/x509.js
generated
vendored
Normal file
35
node_modules/mongodb/lib/core/auth/x509.js
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
'use strict';
|
||||
const AuthProvider = require('./auth_provider').AuthProvider;
|
||||
|
||||
class X509 extends AuthProvider {
|
||||
prepare(handshakeDoc, authContext, callback) {
|
||||
const credentials = authContext.credentials;
|
||||
Object.assign(handshakeDoc, {
|
||||
speculativeAuthenticate: x509AuthenticateCommand(credentials)
|
||||
});
|
||||
|
||||
callback(undefined, handshakeDoc);
|
||||
}
|
||||
|
||||
auth(authContext, callback) {
|
||||
const connection = authContext.connection;
|
||||
const credentials = authContext.credentials;
|
||||
const response = authContext.response;
|
||||
if (response.speculativeAuthenticate) {
|
||||
return callback();
|
||||
}
|
||||
|
||||
connection.command('$external.$cmd', x509AuthenticateCommand(credentials), callback);
|
||||
}
|
||||
}
|
||||
|
||||
function x509AuthenticateCommand(credentials) {
|
||||
const command = { authenticate: 1, mechanism: 'MONGODB-X509' };
|
||||
if (credentials.username) {
|
||||
Object.apply(command, { user: credentials.username });
|
||||
}
|
||||
|
||||
return command;
|
||||
}
|
||||
|
||||
module.exports = X509;
|
||||
251
node_modules/mongodb/lib/core/connection/apm.js
generated
vendored
Normal file
251
node_modules/mongodb/lib/core/connection/apm.js
generated
vendored
Normal file
@@ -0,0 +1,251 @@
|
||||
'use strict';
|
||||
const Msg = require('../connection/msg').Msg;
|
||||
const KillCursor = require('../connection/commands').KillCursor;
|
||||
const GetMore = require('../connection/commands').GetMore;
|
||||
const calculateDurationInMs = require('../../utils').calculateDurationInMs;
|
||||
|
||||
/** Commands that we want to redact because of the sensitive nature of their contents */
|
||||
const SENSITIVE_COMMANDS = new Set([
|
||||
'authenticate',
|
||||
'saslStart',
|
||||
'saslContinue',
|
||||
'getnonce',
|
||||
'createUser',
|
||||
'updateUser',
|
||||
'copydbgetnonce',
|
||||
'copydbsaslstart',
|
||||
'copydb'
|
||||
]);
|
||||
|
||||
// helper methods
|
||||
const extractCommandName = commandDoc => Object.keys(commandDoc)[0];
|
||||
const namespace = command => command.ns;
|
||||
const databaseName = command => command.ns.split('.')[0];
|
||||
const collectionName = command => command.ns.split('.')[1];
|
||||
const generateConnectionId = pool =>
|
||||
pool.options ? `${pool.options.host}:${pool.options.port}` : pool.address;
|
||||
const maybeRedact = (commandName, result) => (SENSITIVE_COMMANDS.has(commandName) ? {} : result);
|
||||
const isLegacyPool = pool => pool.s && pool.queue;
|
||||
|
||||
const LEGACY_FIND_QUERY_MAP = {
|
||||
$query: 'filter',
|
||||
$orderby: 'sort',
|
||||
$hint: 'hint',
|
||||
$comment: 'comment',
|
||||
$maxScan: 'maxScan',
|
||||
$max: 'max',
|
||||
$min: 'min',
|
||||
$returnKey: 'returnKey',
|
||||
$showDiskLoc: 'showRecordId',
|
||||
$maxTimeMS: 'maxTimeMS',
|
||||
$snapshot: 'snapshot'
|
||||
};
|
||||
|
||||
const LEGACY_FIND_OPTIONS_MAP = {
|
||||
numberToSkip: 'skip',
|
||||
numberToReturn: 'batchSize',
|
||||
returnFieldsSelector: 'projection'
|
||||
};
|
||||
|
||||
const OP_QUERY_KEYS = [
|
||||
'tailable',
|
||||
'oplogReplay',
|
||||
'noCursorTimeout',
|
||||
'awaitData',
|
||||
'partial',
|
||||
'exhaust'
|
||||
];
|
||||
|
||||
/**
|
||||
* Extract the actual command from the query, possibly upconverting if it's a legacy
|
||||
* format
|
||||
*
|
||||
* @param {Object} command the command
|
||||
*/
|
||||
const extractCommand = command => {
|
||||
if (command instanceof GetMore) {
|
||||
return {
|
||||
getMore: command.cursorId,
|
||||
collection: collectionName(command),
|
||||
batchSize: command.numberToReturn
|
||||
};
|
||||
}
|
||||
|
||||
if (command instanceof KillCursor) {
|
||||
return {
|
||||
killCursors: collectionName(command),
|
||||
cursors: command.cursorIds
|
||||
};
|
||||
}
|
||||
|
||||
if (command instanceof Msg) {
|
||||
return command.command;
|
||||
}
|
||||
|
||||
if (command.query && command.query.$query) {
|
||||
let result;
|
||||
if (command.ns === 'admin.$cmd') {
|
||||
// upconvert legacy command
|
||||
result = Object.assign({}, command.query.$query);
|
||||
} else {
|
||||
// upconvert legacy find command
|
||||
result = { find: collectionName(command) };
|
||||
Object.keys(LEGACY_FIND_QUERY_MAP).forEach(key => {
|
||||
if (typeof command.query[key] !== 'undefined')
|
||||
result[LEGACY_FIND_QUERY_MAP[key]] = command.query[key];
|
||||
});
|
||||
}
|
||||
|
||||
Object.keys(LEGACY_FIND_OPTIONS_MAP).forEach(key => {
|
||||
if (typeof command[key] !== 'undefined') result[LEGACY_FIND_OPTIONS_MAP[key]] = command[key];
|
||||
});
|
||||
|
||||
OP_QUERY_KEYS.forEach(key => {
|
||||
if (command[key]) result[key] = command[key];
|
||||
});
|
||||
|
||||
if (typeof command.pre32Limit !== 'undefined') {
|
||||
result.limit = command.pre32Limit;
|
||||
}
|
||||
|
||||
if (command.query.$explain) {
|
||||
return { explain: result };
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
return command.query ? command.query : command;
|
||||
};
|
||||
|
||||
const extractReply = (command, reply) => {
|
||||
if (command instanceof GetMore) {
|
||||
return {
|
||||
ok: 1,
|
||||
cursor: {
|
||||
id: reply.message.cursorId,
|
||||
ns: namespace(command),
|
||||
nextBatch: reply.message.documents
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if (command instanceof KillCursor) {
|
||||
return {
|
||||
ok: 1,
|
||||
cursorsUnknown: command.cursorIds
|
||||
};
|
||||
}
|
||||
|
||||
// is this a legacy find command?
|
||||
if (command.query && typeof command.query.$query !== 'undefined') {
|
||||
return {
|
||||
ok: 1,
|
||||
cursor: {
|
||||
id: reply.message.cursorId,
|
||||
ns: namespace(command),
|
||||
firstBatch: reply.message.documents
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return reply && reply.result ? reply.result : reply;
|
||||
};
|
||||
|
||||
const extractConnectionDetails = pool => {
|
||||
if (isLegacyPool(pool)) {
|
||||
return {
|
||||
connectionId: generateConnectionId(pool)
|
||||
};
|
||||
}
|
||||
|
||||
// APM in the modern pool is done at the `Connection` level, so we rename it here for
|
||||
// readability.
|
||||
const connection = pool;
|
||||
return {
|
||||
address: connection.address,
|
||||
connectionId: connection.id
|
||||
};
|
||||
};
|
||||
|
||||
/** An event indicating the start of a given command */
|
||||
class CommandStartedEvent {
|
||||
/**
|
||||
* Create a started event
|
||||
*
|
||||
* @param {Pool} pool the pool that originated the command
|
||||
* @param {Object} command the command
|
||||
*/
|
||||
constructor(pool, command) {
|
||||
const cmd = extractCommand(command);
|
||||
const commandName = extractCommandName(cmd);
|
||||
const connectionDetails = extractConnectionDetails(pool);
|
||||
|
||||
// NOTE: remove in major revision, this is not spec behavior
|
||||
if (SENSITIVE_COMMANDS.has(commandName)) {
|
||||
this.commandObj = {};
|
||||
this.commandObj[commandName] = true;
|
||||
}
|
||||
|
||||
Object.assign(this, connectionDetails, {
|
||||
requestId: command.requestId,
|
||||
databaseName: databaseName(command),
|
||||
commandName,
|
||||
command: cmd
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/** An event indicating the success of a given command */
|
||||
class CommandSucceededEvent {
|
||||
/**
|
||||
* Create a succeeded event
|
||||
*
|
||||
* @param {Pool} pool the pool that originated the command
|
||||
* @param {Object} command the command
|
||||
* @param {Object} reply the reply for this command from the server
|
||||
* @param {Array} started a high resolution tuple timestamp of when the command was first sent, to calculate duration
|
||||
*/
|
||||
constructor(pool, command, reply, started) {
|
||||
const cmd = extractCommand(command);
|
||||
const commandName = extractCommandName(cmd);
|
||||
const connectionDetails = extractConnectionDetails(pool);
|
||||
|
||||
Object.assign(this, connectionDetails, {
|
||||
requestId: command.requestId,
|
||||
commandName,
|
||||
duration: calculateDurationInMs(started),
|
||||
reply: maybeRedact(commandName, extractReply(command, reply))
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/** An event indicating the failure of a given command */
|
||||
class CommandFailedEvent {
|
||||
/**
|
||||
* Create a failure event
|
||||
*
|
||||
* @param {Pool} pool the pool that originated the command
|
||||
* @param {Object} command the command
|
||||
* @param {MongoError|Object} error the generated error or a server error response
|
||||
* @param {Array} started a high resolution tuple timestamp of when the command was first sent, to calculate duration
|
||||
*/
|
||||
constructor(pool, command, error, started) {
|
||||
const cmd = extractCommand(command);
|
||||
const commandName = extractCommandName(cmd);
|
||||
const connectionDetails = extractConnectionDetails(pool);
|
||||
|
||||
Object.assign(this, connectionDetails, {
|
||||
requestId: command.requestId,
|
||||
commandName,
|
||||
duration: calculateDurationInMs(started),
|
||||
failure: maybeRedact(commandName, error)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
CommandStartedEvent,
|
||||
CommandSucceededEvent,
|
||||
CommandFailedEvent
|
||||
};
|
||||
36
node_modules/mongodb/lib/core/connection/command_result.js
generated
vendored
Normal file
36
node_modules/mongodb/lib/core/connection/command_result.js
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
'use strict';
|
||||
|
||||
/**
|
||||
* Creates a new CommandResult instance
|
||||
* @class
|
||||
* @param {object} result CommandResult object
|
||||
* @param {Connection} connection A connection instance associated with this result
|
||||
* @return {CommandResult} A cursor instance
|
||||
*/
|
||||
var CommandResult = function(result, connection, message) {
|
||||
this.result = result;
|
||||
this.connection = connection;
|
||||
this.message = message;
|
||||
};
|
||||
|
||||
/**
|
||||
* Convert CommandResult to JSON
|
||||
* @method
|
||||
* @return {object}
|
||||
*/
|
||||
CommandResult.prototype.toJSON = function() {
|
||||
let result = Object.assign({}, this, this.result);
|
||||
delete result.message;
|
||||
return result;
|
||||
};
|
||||
|
||||
/**
|
||||
* Convert CommandResult to String representation
|
||||
* @method
|
||||
* @return {string}
|
||||
*/
|
||||
CommandResult.prototype.toString = function() {
|
||||
return JSON.stringify(this.toJSON());
|
||||
};
|
||||
|
||||
module.exports = CommandResult;
|
||||
507
node_modules/mongodb/lib/core/connection/commands.js
generated
vendored
Normal file
507
node_modules/mongodb/lib/core/connection/commands.js
generated
vendored
Normal file
@@ -0,0 +1,507 @@
|
||||
'use strict';
|
||||
|
||||
var retrieveBSON = require('./utils').retrieveBSON;
|
||||
var BSON = retrieveBSON();
|
||||
var Long = BSON.Long;
|
||||
const Buffer = require('safe-buffer').Buffer;
|
||||
|
||||
// Incrementing request id
|
||||
var _requestId = 0;
|
||||
|
||||
// Wire command operation ids
|
||||
var opcodes = require('../wireprotocol/shared').opcodes;
|
||||
|
||||
// Query flags
|
||||
var OPTS_TAILABLE_CURSOR = 2;
|
||||
var OPTS_SLAVE = 4;
|
||||
var OPTS_OPLOG_REPLAY = 8;
|
||||
var OPTS_NO_CURSOR_TIMEOUT = 16;
|
||||
var OPTS_AWAIT_DATA = 32;
|
||||
var OPTS_EXHAUST = 64;
|
||||
var OPTS_PARTIAL = 128;
|
||||
|
||||
// Response flags
|
||||
var CURSOR_NOT_FOUND = 1;
|
||||
var QUERY_FAILURE = 2;
|
||||
var SHARD_CONFIG_STALE = 4;
|
||||
var AWAIT_CAPABLE = 8;
|
||||
|
||||
/**************************************************************
|
||||
* QUERY
|
||||
**************************************************************/
|
||||
var Query = function(bson, ns, query, options) {
|
||||
var self = this;
|
||||
// Basic options needed to be passed in
|
||||
if (ns == null) throw new Error('ns must be specified for query');
|
||||
if (query == null) throw new Error('query must be specified for query');
|
||||
|
||||
// Validate that we are not passing 0x00 in the collection name
|
||||
if (ns.indexOf('\x00') !== -1) {
|
||||
throw new Error('namespace cannot contain a null character');
|
||||
}
|
||||
|
||||
// Basic options
|
||||
this.bson = bson;
|
||||
this.ns = ns;
|
||||
this.query = query;
|
||||
|
||||
// Additional options
|
||||
this.numberToSkip = options.numberToSkip || 0;
|
||||
this.numberToReturn = options.numberToReturn || 0;
|
||||
this.returnFieldSelector = options.returnFieldSelector || null;
|
||||
this.requestId = Query.getRequestId();
|
||||
|
||||
// special case for pre-3.2 find commands, delete ASAP
|
||||
this.pre32Limit = options.pre32Limit;
|
||||
|
||||
// Serialization option
|
||||
this.serializeFunctions =
|
||||
typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false;
|
||||
this.ignoreUndefined =
|
||||
typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false;
|
||||
this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16;
|
||||
this.checkKeys = typeof options.checkKeys === 'boolean' ? options.checkKeys : true;
|
||||
this.batchSize = self.numberToReturn;
|
||||
|
||||
// Flags
|
||||
this.tailable = false;
|
||||
this.slaveOk = typeof options.slaveOk === 'boolean' ? options.slaveOk : false;
|
||||
this.oplogReplay = false;
|
||||
this.noCursorTimeout = false;
|
||||
this.awaitData = false;
|
||||
this.exhaust = false;
|
||||
this.partial = false;
|
||||
};
|
||||
|
||||
//
|
||||
// Assign a new request Id
|
||||
Query.prototype.incRequestId = function() {
|
||||
this.requestId = _requestId++;
|
||||
};
|
||||
|
||||
//
|
||||
// Assign a new request Id
|
||||
Query.nextRequestId = function() {
|
||||
return _requestId + 1;
|
||||
};
|
||||
|
||||
//
|
||||
// Uses a single allocated buffer for the process, avoiding multiple memory allocations
|
||||
Query.prototype.toBin = function() {
|
||||
var self = this;
|
||||
var buffers = [];
|
||||
var projection = null;
|
||||
|
||||
// Set up the flags
|
||||
var flags = 0;
|
||||
if (this.tailable) {
|
||||
flags |= OPTS_TAILABLE_CURSOR;
|
||||
}
|
||||
|
||||
if (this.slaveOk) {
|
||||
flags |= OPTS_SLAVE;
|
||||
}
|
||||
|
||||
if (this.oplogReplay) {
|
||||
flags |= OPTS_OPLOG_REPLAY;
|
||||
}
|
||||
|
||||
if (this.noCursorTimeout) {
|
||||
flags |= OPTS_NO_CURSOR_TIMEOUT;
|
||||
}
|
||||
|
||||
if (this.awaitData) {
|
||||
flags |= OPTS_AWAIT_DATA;
|
||||
}
|
||||
|
||||
if (this.exhaust) {
|
||||
flags |= OPTS_EXHAUST;
|
||||
}
|
||||
|
||||
if (this.partial) {
|
||||
flags |= OPTS_PARTIAL;
|
||||
}
|
||||
|
||||
// If batchSize is different to self.numberToReturn
|
||||
if (self.batchSize !== self.numberToReturn) self.numberToReturn = self.batchSize;
|
||||
|
||||
// Allocate write protocol header buffer
|
||||
var header = Buffer.alloc(
|
||||
4 * 4 + // Header
|
||||
4 + // Flags
|
||||
Buffer.byteLength(self.ns) +
|
||||
1 + // namespace
|
||||
4 + // numberToSkip
|
||||
4 // numberToReturn
|
||||
);
|
||||
|
||||
// Add header to buffers
|
||||
buffers.push(header);
|
||||
|
||||
// Serialize the query
|
||||
var query = self.bson.serialize(this.query, {
|
||||
checkKeys: this.checkKeys,
|
||||
serializeFunctions: this.serializeFunctions,
|
||||
ignoreUndefined: this.ignoreUndefined
|
||||
});
|
||||
|
||||
// Add query document
|
||||
buffers.push(query);
|
||||
|
||||
if (self.returnFieldSelector && Object.keys(self.returnFieldSelector).length > 0) {
|
||||
// Serialize the projection document
|
||||
projection = self.bson.serialize(this.returnFieldSelector, {
|
||||
checkKeys: this.checkKeys,
|
||||
serializeFunctions: this.serializeFunctions,
|
||||
ignoreUndefined: this.ignoreUndefined
|
||||
});
|
||||
// Add projection document
|
||||
buffers.push(projection);
|
||||
}
|
||||
|
||||
// Total message size
|
||||
var totalLength = header.length + query.length + (projection ? projection.length : 0);
|
||||
|
||||
// Set up the index
|
||||
var index = 4;
|
||||
|
||||
// Write total document length
|
||||
header[3] = (totalLength >> 24) & 0xff;
|
||||
header[2] = (totalLength >> 16) & 0xff;
|
||||
header[1] = (totalLength >> 8) & 0xff;
|
||||
header[0] = totalLength & 0xff;
|
||||
|
||||
// Write header information requestId
|
||||
header[index + 3] = (this.requestId >> 24) & 0xff;
|
||||
header[index + 2] = (this.requestId >> 16) & 0xff;
|
||||
header[index + 1] = (this.requestId >> 8) & 0xff;
|
||||
header[index] = this.requestId & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write header information responseTo
|
||||
header[index + 3] = (0 >> 24) & 0xff;
|
||||
header[index + 2] = (0 >> 16) & 0xff;
|
||||
header[index + 1] = (0 >> 8) & 0xff;
|
||||
header[index] = 0 & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write header information OP_QUERY
|
||||
header[index + 3] = (opcodes.OP_QUERY >> 24) & 0xff;
|
||||
header[index + 2] = (opcodes.OP_QUERY >> 16) & 0xff;
|
||||
header[index + 1] = (opcodes.OP_QUERY >> 8) & 0xff;
|
||||
header[index] = opcodes.OP_QUERY & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write header information flags
|
||||
header[index + 3] = (flags >> 24) & 0xff;
|
||||
header[index + 2] = (flags >> 16) & 0xff;
|
||||
header[index + 1] = (flags >> 8) & 0xff;
|
||||
header[index] = flags & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write collection name
|
||||
index = index + header.write(this.ns, index, 'utf8') + 1;
|
||||
header[index - 1] = 0;
|
||||
|
||||
// Write header information flags numberToSkip
|
||||
header[index + 3] = (this.numberToSkip >> 24) & 0xff;
|
||||
header[index + 2] = (this.numberToSkip >> 16) & 0xff;
|
||||
header[index + 1] = (this.numberToSkip >> 8) & 0xff;
|
||||
header[index] = this.numberToSkip & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write header information flags numberToReturn
|
||||
header[index + 3] = (this.numberToReturn >> 24) & 0xff;
|
||||
header[index + 2] = (this.numberToReturn >> 16) & 0xff;
|
||||
header[index + 1] = (this.numberToReturn >> 8) & 0xff;
|
||||
header[index] = this.numberToReturn & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Return the buffers
|
||||
return buffers;
|
||||
};
|
||||
|
||||
Query.getRequestId = function() {
|
||||
return ++_requestId;
|
||||
};
|
||||
|
||||
/**************************************************************
|
||||
* GETMORE
|
||||
**************************************************************/
|
||||
var GetMore = function(bson, ns, cursorId, opts) {
|
||||
opts = opts || {};
|
||||
this.numberToReturn = opts.numberToReturn || 0;
|
||||
this.requestId = _requestId++;
|
||||
this.bson = bson;
|
||||
this.ns = ns;
|
||||
this.cursorId = cursorId;
|
||||
};
|
||||
|
||||
//
|
||||
// Uses a single allocated buffer for the process, avoiding multiple memory allocations
|
||||
GetMore.prototype.toBin = function() {
|
||||
var length = 4 + Buffer.byteLength(this.ns) + 1 + 4 + 8 + 4 * 4;
|
||||
// Create command buffer
|
||||
var index = 0;
|
||||
// Allocate buffer
|
||||
var _buffer = Buffer.alloc(length);
|
||||
|
||||
// Write header information
|
||||
// index = write32bit(index, _buffer, length);
|
||||
_buffer[index + 3] = (length >> 24) & 0xff;
|
||||
_buffer[index + 2] = (length >> 16) & 0xff;
|
||||
_buffer[index + 1] = (length >> 8) & 0xff;
|
||||
_buffer[index] = length & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, requestId);
|
||||
_buffer[index + 3] = (this.requestId >> 24) & 0xff;
|
||||
_buffer[index + 2] = (this.requestId >> 16) & 0xff;
|
||||
_buffer[index + 1] = (this.requestId >> 8) & 0xff;
|
||||
_buffer[index] = this.requestId & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, 0);
|
||||
_buffer[index + 3] = (0 >> 24) & 0xff;
|
||||
_buffer[index + 2] = (0 >> 16) & 0xff;
|
||||
_buffer[index + 1] = (0 >> 8) & 0xff;
|
||||
_buffer[index] = 0 & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, OP_GETMORE);
|
||||
_buffer[index + 3] = (opcodes.OP_GETMORE >> 24) & 0xff;
|
||||
_buffer[index + 2] = (opcodes.OP_GETMORE >> 16) & 0xff;
|
||||
_buffer[index + 1] = (opcodes.OP_GETMORE >> 8) & 0xff;
|
||||
_buffer[index] = opcodes.OP_GETMORE & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, 0);
|
||||
_buffer[index + 3] = (0 >> 24) & 0xff;
|
||||
_buffer[index + 2] = (0 >> 16) & 0xff;
|
||||
_buffer[index + 1] = (0 >> 8) & 0xff;
|
||||
_buffer[index] = 0 & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write collection name
|
||||
index = index + _buffer.write(this.ns, index, 'utf8') + 1;
|
||||
_buffer[index - 1] = 0;
|
||||
|
||||
// Write batch size
|
||||
// index = write32bit(index, _buffer, numberToReturn);
|
||||
_buffer[index + 3] = (this.numberToReturn >> 24) & 0xff;
|
||||
_buffer[index + 2] = (this.numberToReturn >> 16) & 0xff;
|
||||
_buffer[index + 1] = (this.numberToReturn >> 8) & 0xff;
|
||||
_buffer[index] = this.numberToReturn & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write cursor id
|
||||
// index = write32bit(index, _buffer, cursorId.getLowBits());
|
||||
_buffer[index + 3] = (this.cursorId.getLowBits() >> 24) & 0xff;
|
||||
_buffer[index + 2] = (this.cursorId.getLowBits() >> 16) & 0xff;
|
||||
_buffer[index + 1] = (this.cursorId.getLowBits() >> 8) & 0xff;
|
||||
_buffer[index] = this.cursorId.getLowBits() & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, cursorId.getHighBits());
|
||||
_buffer[index + 3] = (this.cursorId.getHighBits() >> 24) & 0xff;
|
||||
_buffer[index + 2] = (this.cursorId.getHighBits() >> 16) & 0xff;
|
||||
_buffer[index + 1] = (this.cursorId.getHighBits() >> 8) & 0xff;
|
||||
_buffer[index] = this.cursorId.getHighBits() & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Return buffer
|
||||
return _buffer;
|
||||
};
|
||||
|
||||
/**************************************************************
|
||||
* KILLCURSOR
|
||||
**************************************************************/
|
||||
var KillCursor = function(bson, ns, cursorIds) {
|
||||
this.ns = ns;
|
||||
this.requestId = _requestId++;
|
||||
this.cursorIds = cursorIds;
|
||||
};
|
||||
|
||||
//
|
||||
// Uses a single allocated buffer for the process, avoiding multiple memory allocations
|
||||
KillCursor.prototype.toBin = function() {
|
||||
var length = 4 + 4 + 4 * 4 + this.cursorIds.length * 8;
|
||||
|
||||
// Create command buffer
|
||||
var index = 0;
|
||||
var _buffer = Buffer.alloc(length);
|
||||
|
||||
// Write header information
|
||||
// index = write32bit(index, _buffer, length);
|
||||
_buffer[index + 3] = (length >> 24) & 0xff;
|
||||
_buffer[index + 2] = (length >> 16) & 0xff;
|
||||
_buffer[index + 1] = (length >> 8) & 0xff;
|
||||
_buffer[index] = length & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, requestId);
|
||||
_buffer[index + 3] = (this.requestId >> 24) & 0xff;
|
||||
_buffer[index + 2] = (this.requestId >> 16) & 0xff;
|
||||
_buffer[index + 1] = (this.requestId >> 8) & 0xff;
|
||||
_buffer[index] = this.requestId & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, 0);
|
||||
_buffer[index + 3] = (0 >> 24) & 0xff;
|
||||
_buffer[index + 2] = (0 >> 16) & 0xff;
|
||||
_buffer[index + 1] = (0 >> 8) & 0xff;
|
||||
_buffer[index] = 0 & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, OP_KILL_CURSORS);
|
||||
_buffer[index + 3] = (opcodes.OP_KILL_CURSORS >> 24) & 0xff;
|
||||
_buffer[index + 2] = (opcodes.OP_KILL_CURSORS >> 16) & 0xff;
|
||||
_buffer[index + 1] = (opcodes.OP_KILL_CURSORS >> 8) & 0xff;
|
||||
_buffer[index] = opcodes.OP_KILL_CURSORS & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, 0);
|
||||
_buffer[index + 3] = (0 >> 24) & 0xff;
|
||||
_buffer[index + 2] = (0 >> 16) & 0xff;
|
||||
_buffer[index + 1] = (0 >> 8) & 0xff;
|
||||
_buffer[index] = 0 & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write batch size
|
||||
// index = write32bit(index, _buffer, this.cursorIds.length);
|
||||
_buffer[index + 3] = (this.cursorIds.length >> 24) & 0xff;
|
||||
_buffer[index + 2] = (this.cursorIds.length >> 16) & 0xff;
|
||||
_buffer[index + 1] = (this.cursorIds.length >> 8) & 0xff;
|
||||
_buffer[index] = this.cursorIds.length & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write all the cursor ids into the array
|
||||
for (var i = 0; i < this.cursorIds.length; i++) {
|
||||
// Write cursor id
|
||||
// index = write32bit(index, _buffer, cursorIds[i].getLowBits());
|
||||
_buffer[index + 3] = (this.cursorIds[i].getLowBits() >> 24) & 0xff;
|
||||
_buffer[index + 2] = (this.cursorIds[i].getLowBits() >> 16) & 0xff;
|
||||
_buffer[index + 1] = (this.cursorIds[i].getLowBits() >> 8) & 0xff;
|
||||
_buffer[index] = this.cursorIds[i].getLowBits() & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, cursorIds[i].getHighBits());
|
||||
_buffer[index + 3] = (this.cursorIds[i].getHighBits() >> 24) & 0xff;
|
||||
_buffer[index + 2] = (this.cursorIds[i].getHighBits() >> 16) & 0xff;
|
||||
_buffer[index + 1] = (this.cursorIds[i].getHighBits() >> 8) & 0xff;
|
||||
_buffer[index] = this.cursorIds[i].getHighBits() & 0xff;
|
||||
index = index + 4;
|
||||
}
|
||||
|
||||
// Return buffer
|
||||
return _buffer;
|
||||
};
|
||||
|
||||
var Response = function(bson, message, msgHeader, msgBody, opts) {
|
||||
opts = opts || { promoteLongs: true, promoteValues: true, promoteBuffers: false };
|
||||
this.parsed = false;
|
||||
this.raw = message;
|
||||
this.data = msgBody;
|
||||
this.bson = bson;
|
||||
this.opts = opts;
|
||||
|
||||
// Read the message header
|
||||
this.length = msgHeader.length;
|
||||
this.requestId = msgHeader.requestId;
|
||||
this.responseTo = msgHeader.responseTo;
|
||||
this.opCode = msgHeader.opCode;
|
||||
this.fromCompressed = msgHeader.fromCompressed;
|
||||
|
||||
// Read the message body
|
||||
this.responseFlags = msgBody.readInt32LE(0);
|
||||
this.cursorId = new Long(msgBody.readInt32LE(4), msgBody.readInt32LE(8));
|
||||
this.startingFrom = msgBody.readInt32LE(12);
|
||||
this.numberReturned = msgBody.readInt32LE(16);
|
||||
|
||||
// Preallocate document array
|
||||
this.documents = new Array(this.numberReturned);
|
||||
|
||||
// Flag values
|
||||
this.cursorNotFound = (this.responseFlags & CURSOR_NOT_FOUND) !== 0;
|
||||
this.queryFailure = (this.responseFlags & QUERY_FAILURE) !== 0;
|
||||
this.shardConfigStale = (this.responseFlags & SHARD_CONFIG_STALE) !== 0;
|
||||
this.awaitCapable = (this.responseFlags & AWAIT_CAPABLE) !== 0;
|
||||
this.promoteLongs = typeof opts.promoteLongs === 'boolean' ? opts.promoteLongs : true;
|
||||
this.promoteValues = typeof opts.promoteValues === 'boolean' ? opts.promoteValues : true;
|
||||
this.promoteBuffers = typeof opts.promoteBuffers === 'boolean' ? opts.promoteBuffers : false;
|
||||
};
|
||||
|
||||
Response.prototype.isParsed = function() {
|
||||
return this.parsed;
|
||||
};
|
||||
|
||||
Response.prototype.parse = function(options) {
|
||||
// Don't parse again if not needed
|
||||
if (this.parsed) return;
|
||||
options = options || {};
|
||||
|
||||
// Allow the return of raw documents instead of parsing
|
||||
var raw = options.raw || false;
|
||||
var documentsReturnedIn = options.documentsReturnedIn || null;
|
||||
var promoteLongs =
|
||||
typeof options.promoteLongs === 'boolean' ? options.promoteLongs : this.opts.promoteLongs;
|
||||
var promoteValues =
|
||||
typeof options.promoteValues === 'boolean' ? options.promoteValues : this.opts.promoteValues;
|
||||
var promoteBuffers =
|
||||
typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : this.opts.promoteBuffers;
|
||||
var bsonSize, _options;
|
||||
|
||||
// Set up the options
|
||||
_options = {
|
||||
promoteLongs: promoteLongs,
|
||||
promoteValues: promoteValues,
|
||||
promoteBuffers: promoteBuffers
|
||||
};
|
||||
|
||||
// Position within OP_REPLY at which documents start
|
||||
// (See https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#wire-op-reply)
|
||||
this.index = 20;
|
||||
|
||||
//
|
||||
// Parse Body
|
||||
//
|
||||
for (var i = 0; i < this.numberReturned; i++) {
|
||||
bsonSize =
|
||||
this.data[this.index] |
|
||||
(this.data[this.index + 1] << 8) |
|
||||
(this.data[this.index + 2] << 16) |
|
||||
(this.data[this.index + 3] << 24);
|
||||
|
||||
// If we have raw results specified slice the return document
|
||||
if (raw) {
|
||||
this.documents[i] = this.data.slice(this.index, this.index + bsonSize);
|
||||
} else {
|
||||
this.documents[i] = this.bson.deserialize(
|
||||
this.data.slice(this.index, this.index + bsonSize),
|
||||
_options
|
||||
);
|
||||
}
|
||||
|
||||
// Adjust the index
|
||||
this.index = this.index + bsonSize;
|
||||
}
|
||||
|
||||
if (this.documents.length === 1 && documentsReturnedIn != null && raw) {
|
||||
const fieldsAsRaw = {};
|
||||
fieldsAsRaw[documentsReturnedIn] = true;
|
||||
_options.fieldsAsRaw = fieldsAsRaw;
|
||||
|
||||
const doc = this.bson.deserialize(this.documents[0], _options);
|
||||
this.documents = [doc];
|
||||
}
|
||||
|
||||
// Set parsed
|
||||
this.parsed = true;
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
Query: Query,
|
||||
GetMore: GetMore,
|
||||
Response: Response,
|
||||
KillCursor: KillCursor
|
||||
};
|
||||
352
node_modules/mongodb/lib/core/connection/connect.js
generated
vendored
Normal file
352
node_modules/mongodb/lib/core/connection/connect.js
generated
vendored
Normal file
@@ -0,0 +1,352 @@
|
||||
'use strict';
|
||||
const net = require('net');
|
||||
const tls = require('tls');
|
||||
const Connection = require('./connection');
|
||||
const MongoError = require('../error').MongoError;
|
||||
const MongoNetworkError = require('../error').MongoNetworkError;
|
||||
const MongoNetworkTimeoutError = require('../error').MongoNetworkTimeoutError;
|
||||
const defaultAuthProviders = require('../auth/defaultAuthProviders').defaultAuthProviders;
|
||||
const AuthContext = require('../auth/auth_provider').AuthContext;
|
||||
const WIRE_CONSTANTS = require('../wireprotocol/constants');
|
||||
const makeClientMetadata = require('../utils').makeClientMetadata;
|
||||
const MAX_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_WIRE_VERSION;
|
||||
const MAX_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_SERVER_VERSION;
|
||||
const MIN_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_WIRE_VERSION;
|
||||
const MIN_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_SERVER_VERSION;
|
||||
let AUTH_PROVIDERS;
|
||||
|
||||
function connect(options, cancellationToken, callback) {
|
||||
if (typeof cancellationToken === 'function') {
|
||||
callback = cancellationToken;
|
||||
cancellationToken = undefined;
|
||||
}
|
||||
|
||||
const ConnectionType = options && options.connectionType ? options.connectionType : Connection;
|
||||
if (AUTH_PROVIDERS == null) {
|
||||
AUTH_PROVIDERS = defaultAuthProviders(options.bson);
|
||||
}
|
||||
|
||||
const family = options.family !== void 0 ? options.family : 0;
|
||||
makeConnection(family, options, cancellationToken, (err, socket) => {
|
||||
if (err) {
|
||||
callback(err, socket); // in the error case, `socket` is the originating error event name
|
||||
return;
|
||||
}
|
||||
|
||||
performInitialHandshake(new ConnectionType(socket, options), options, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function isModernConnectionType(conn) {
|
||||
return !(conn instanceof Connection);
|
||||
}
|
||||
|
||||
function checkSupportedServer(ismaster, options) {
|
||||
const serverVersionHighEnough =
|
||||
ismaster &&
|
||||
typeof ismaster.maxWireVersion === 'number' &&
|
||||
ismaster.maxWireVersion >= MIN_SUPPORTED_WIRE_VERSION;
|
||||
const serverVersionLowEnough =
|
||||
ismaster &&
|
||||
typeof ismaster.minWireVersion === 'number' &&
|
||||
ismaster.minWireVersion <= MAX_SUPPORTED_WIRE_VERSION;
|
||||
|
||||
if (serverVersionHighEnough) {
|
||||
if (serverVersionLowEnough) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const message = `Server at ${options.host}:${options.port} reports minimum wire version ${ismaster.minWireVersion}, but this version of the Node.js Driver requires at most ${MAX_SUPPORTED_WIRE_VERSION} (MongoDB ${MAX_SUPPORTED_SERVER_VERSION})`;
|
||||
return new MongoError(message);
|
||||
}
|
||||
|
||||
const message = `Server at ${options.host}:${
|
||||
options.port
|
||||
} reports maximum wire version ${ismaster.maxWireVersion ||
|
||||
0}, but this version of the Node.js Driver requires at least ${MIN_SUPPORTED_WIRE_VERSION} (MongoDB ${MIN_SUPPORTED_SERVER_VERSION})`;
|
||||
return new MongoError(message);
|
||||
}
|
||||
|
||||
function performInitialHandshake(conn, options, _callback) {
|
||||
const callback = function(err, ret) {
|
||||
if (err && conn) {
|
||||
conn.destroy();
|
||||
}
|
||||
_callback(err, ret);
|
||||
};
|
||||
|
||||
const credentials = options.credentials;
|
||||
if (credentials) {
|
||||
if (!credentials.mechanism.match(/DEFAULT/i) && !AUTH_PROVIDERS[credentials.mechanism]) {
|
||||
callback(new MongoError(`authMechanism '${credentials.mechanism}' not supported`));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const authContext = new AuthContext(conn, credentials, options);
|
||||
prepareHandshakeDocument(authContext, (err, handshakeDoc) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
|
||||
const handshakeOptions = Object.assign({}, options);
|
||||
if (options.connectTimeoutMS || options.connectionTimeout) {
|
||||
// The handshake technically is a monitoring check, so its socket timeout should be connectTimeoutMS
|
||||
handshakeOptions.socketTimeout = options.connectTimeoutMS || options.connectionTimeout;
|
||||
}
|
||||
|
||||
const start = new Date().getTime();
|
||||
conn.command('admin.$cmd', handshakeDoc, handshakeOptions, (err, result) => {
|
||||
if (err) {
|
||||
callback(err);
|
||||
return;
|
||||
}
|
||||
|
||||
const response = result.result;
|
||||
if (response.ok === 0) {
|
||||
callback(new MongoError(response));
|
||||
return;
|
||||
}
|
||||
|
||||
const supportedServerErr = checkSupportedServer(response, options);
|
||||
if (supportedServerErr) {
|
||||
callback(supportedServerErr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!isModernConnectionType(conn)) {
|
||||
// resolve compression
|
||||
if (response.compression) {
|
||||
const agreedCompressors = handshakeDoc.compression.filter(
|
||||
compressor => response.compression.indexOf(compressor) !== -1
|
||||
);
|
||||
|
||||
if (agreedCompressors.length) {
|
||||
conn.agreedCompressor = agreedCompressors[0];
|
||||
}
|
||||
|
||||
if (options.compression && options.compression.zlibCompressionLevel) {
|
||||
conn.zlibCompressionLevel = options.compression.zlibCompressionLevel;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: This is metadata attached to the connection while porting away from
|
||||
// handshake being done in the `Server` class. Likely, it should be
|
||||
// relocated, or at very least restructured.
|
||||
conn.ismaster = response;
|
||||
conn.lastIsMasterMS = new Date().getTime() - start;
|
||||
|
||||
if (!response.arbiterOnly && credentials) {
|
||||
// store the response on auth context
|
||||
Object.assign(authContext, { response });
|
||||
|
||||
const resolvedCredentials = credentials.resolveAuthMechanism(response);
|
||||
const authProvider = AUTH_PROVIDERS[resolvedCredentials.mechanism];
|
||||
authProvider.auth(authContext, err => {
|
||||
if (err) return callback(err);
|
||||
callback(undefined, conn);
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
callback(undefined, conn);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function prepareHandshakeDocument(authContext, callback) {
|
||||
const options = authContext.options;
|
||||
const compressors =
|
||||
options.compression && options.compression.compressors ? options.compression.compressors : [];
|
||||
|
||||
const handshakeDoc = {
|
||||
ismaster: true,
|
||||
client: options.metadata || makeClientMetadata(options),
|
||||
compression: compressors
|
||||
};
|
||||
|
||||
const credentials = authContext.credentials;
|
||||
if (credentials) {
|
||||
if (credentials.mechanism.match(/DEFAULT/i) && credentials.username) {
|
||||
Object.assign(handshakeDoc, {
|
||||
saslSupportedMechs: `${credentials.source}.${credentials.username}`
|
||||
});
|
||||
|
||||
AUTH_PROVIDERS['scram-sha-256'].prepare(handshakeDoc, authContext, callback);
|
||||
return;
|
||||
}
|
||||
|
||||
const authProvider = AUTH_PROVIDERS[credentials.mechanism];
|
||||
authProvider.prepare(handshakeDoc, authContext, callback);
|
||||
return;
|
||||
}
|
||||
|
||||
callback(undefined, handshakeDoc);
|
||||
}
|
||||
|
||||
const LEGAL_SSL_SOCKET_OPTIONS = [
|
||||
'pfx',
|
||||
'key',
|
||||
'passphrase',
|
||||
'cert',
|
||||
'ca',
|
||||
'ciphers',
|
||||
'NPNProtocols',
|
||||
'ALPNProtocols',
|
||||
'servername',
|
||||
'ecdhCurve',
|
||||
'secureProtocol',
|
||||
'secureContext',
|
||||
'session',
|
||||
'minDHSize',
|
||||
'crl',
|
||||
'rejectUnauthorized'
|
||||
];
|
||||
|
||||
function parseConnectOptions(family, options) {
|
||||
const host = typeof options.host === 'string' ? options.host : 'localhost';
|
||||
if (host.indexOf('/') !== -1) {
|
||||
return { path: host };
|
||||
}
|
||||
|
||||
const result = {
|
||||
family,
|
||||
host,
|
||||
port: typeof options.port === 'number' ? options.port : 27017,
|
||||
rejectUnauthorized: false
|
||||
};
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
function parseSslOptions(family, options) {
|
||||
const result = parseConnectOptions(family, options);
|
||||
|
||||
// Merge in valid SSL options
|
||||
for (const name in options) {
|
||||
if (options[name] != null && LEGAL_SSL_SOCKET_OPTIONS.indexOf(name) !== -1) {
|
||||
result[name] = options[name];
|
||||
}
|
||||
}
|
||||
|
||||
// Override checkServerIdentity behavior
|
||||
if (options.checkServerIdentity === false) {
|
||||
// Skip the identiy check by retuning undefined as per node documents
|
||||
// https://nodejs.org/api/tls.html#tls_tls_connect_options_callback
|
||||
result.checkServerIdentity = function() {
|
||||
return undefined;
|
||||
};
|
||||
} else if (typeof options.checkServerIdentity === 'function') {
|
||||
result.checkServerIdentity = options.checkServerIdentity;
|
||||
}
|
||||
|
||||
// Set default sni servername to be the same as host
|
||||
if (result.servername == null) {
|
||||
result.servername = result.host;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
const SOCKET_ERROR_EVENTS = new Set(['error', 'close', 'timeout', 'parseError']);
|
||||
function makeConnection(family, options, cancellationToken, _callback) {
|
||||
const useSsl = typeof options.ssl === 'boolean' ? options.ssl : false;
|
||||
const keepAlive = typeof options.keepAlive === 'boolean' ? options.keepAlive : true;
|
||||
let keepAliveInitialDelay =
|
||||
typeof options.keepAliveInitialDelay === 'number' ? options.keepAliveInitialDelay : 120000;
|
||||
const noDelay = typeof options.noDelay === 'boolean' ? options.noDelay : true;
|
||||
const connectionTimeout =
|
||||
typeof options.connectionTimeout === 'number'
|
||||
? options.connectionTimeout
|
||||
: typeof options.connectTimeoutMS === 'number'
|
||||
? options.connectTimeoutMS
|
||||
: 30000;
|
||||
const socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000;
|
||||
const rejectUnauthorized =
|
||||
typeof options.rejectUnauthorized === 'boolean' ? options.rejectUnauthorized : true;
|
||||
|
||||
if (keepAliveInitialDelay > socketTimeout) {
|
||||
keepAliveInitialDelay = Math.round(socketTimeout / 2);
|
||||
}
|
||||
|
||||
let socket;
|
||||
const callback = function(err, ret) {
|
||||
if (err && socket) {
|
||||
socket.destroy();
|
||||
}
|
||||
|
||||
_callback(err, ret);
|
||||
};
|
||||
|
||||
try {
|
||||
if (useSsl) {
|
||||
socket = tls.connect(parseSslOptions(family, options));
|
||||
if (typeof socket.disableRenegotiation === 'function') {
|
||||
socket.disableRenegotiation();
|
||||
}
|
||||
} else {
|
||||
socket = net.createConnection(parseConnectOptions(family, options));
|
||||
}
|
||||
} catch (err) {
|
||||
return callback(err);
|
||||
}
|
||||
|
||||
socket.setKeepAlive(keepAlive, keepAliveInitialDelay);
|
||||
socket.setTimeout(connectionTimeout);
|
||||
socket.setNoDelay(noDelay);
|
||||
|
||||
const connectEvent = useSsl ? 'secureConnect' : 'connect';
|
||||
let cancellationHandler;
|
||||
function errorHandler(eventName) {
|
||||
return err => {
|
||||
SOCKET_ERROR_EVENTS.forEach(event => socket.removeAllListeners(event));
|
||||
if (cancellationHandler) {
|
||||
cancellationToken.removeListener('cancel', cancellationHandler);
|
||||
}
|
||||
|
||||
socket.removeListener(connectEvent, connectHandler);
|
||||
callback(connectionFailureError(eventName, err));
|
||||
};
|
||||
}
|
||||
|
||||
function connectHandler() {
|
||||
SOCKET_ERROR_EVENTS.forEach(event => socket.removeAllListeners(event));
|
||||
if (cancellationHandler) {
|
||||
cancellationToken.removeListener('cancel', cancellationHandler);
|
||||
}
|
||||
|
||||
if (socket.authorizationError && rejectUnauthorized) {
|
||||
return callback(socket.authorizationError);
|
||||
}
|
||||
|
||||
socket.setTimeout(socketTimeout);
|
||||
callback(null, socket);
|
||||
}
|
||||
|
||||
SOCKET_ERROR_EVENTS.forEach(event => socket.once(event, errorHandler(event)));
|
||||
if (cancellationToken) {
|
||||
cancellationHandler = errorHandler('cancel');
|
||||
cancellationToken.once('cancel', cancellationHandler);
|
||||
}
|
||||
|
||||
socket.once(connectEvent, connectHandler);
|
||||
}
|
||||
|
||||
function connectionFailureError(type, err) {
|
||||
switch (type) {
|
||||
case 'error':
|
||||
return new MongoNetworkError(err);
|
||||
case 'timeout':
|
||||
return new MongoNetworkTimeoutError(`connection timed out`);
|
||||
case 'close':
|
||||
return new MongoNetworkError(`connection closed`);
|
||||
case 'cancel':
|
||||
return new MongoNetworkError(`connection establishment was cancelled`);
|
||||
default:
|
||||
return new MongoNetworkError(`unknown network error`);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = connect;
|
||||
712
node_modules/mongodb/lib/core/connection/connection.js
generated
vendored
Normal file
712
node_modules/mongodb/lib/core/connection/connection.js
generated
vendored
Normal file
@@ -0,0 +1,712 @@
|
||||
'use strict';
|
||||
|
||||
const EventEmitter = require('events').EventEmitter;
|
||||
const crypto = require('crypto');
|
||||
const debugOptions = require('./utils').debugOptions;
|
||||
const parseHeader = require('../wireprotocol/shared').parseHeader;
|
||||
const decompress = require('../wireprotocol/compression').decompress;
|
||||
const Response = require('./commands').Response;
|
||||
const BinMsg = require('./msg').BinMsg;
|
||||
const MongoNetworkError = require('../error').MongoNetworkError;
|
||||
const MongoNetworkTimeoutError = require('../error').MongoNetworkTimeoutError;
|
||||
const MongoError = require('../error').MongoError;
|
||||
const Logger = require('./logger');
|
||||
const OP_COMPRESSED = require('../wireprotocol/shared').opcodes.OP_COMPRESSED;
|
||||
const OP_MSG = require('../wireprotocol/shared').opcodes.OP_MSG;
|
||||
const MESSAGE_HEADER_SIZE = require('../wireprotocol/shared').MESSAGE_HEADER_SIZE;
|
||||
const Buffer = require('safe-buffer').Buffer;
|
||||
const Query = require('./commands').Query;
|
||||
const CommandResult = require('./command_result');
|
||||
|
||||
let _id = 0;
|
||||
|
||||
const DEFAULT_MAX_BSON_MESSAGE_SIZE = 1024 * 1024 * 16 * 4;
|
||||
const DEBUG_FIELDS = [
|
||||
'host',
|
||||
'port',
|
||||
'size',
|
||||
'keepAlive',
|
||||
'keepAliveInitialDelay',
|
||||
'noDelay',
|
||||
'connectionTimeout',
|
||||
'socketTimeout',
|
||||
'ssl',
|
||||
'ca',
|
||||
'crl',
|
||||
'cert',
|
||||
'rejectUnauthorized',
|
||||
'promoteLongs',
|
||||
'promoteValues',
|
||||
'promoteBuffers',
|
||||
'checkServerIdentity'
|
||||
];
|
||||
|
||||
let connectionAccountingSpy = undefined;
|
||||
let connectionAccounting = false;
|
||||
let connections = {};
|
||||
|
||||
/**
|
||||
* A class representing a single connection to a MongoDB server
|
||||
*
|
||||
* @fires Connection#connect
|
||||
* @fires Connection#close
|
||||
* @fires Connection#error
|
||||
* @fires Connection#timeout
|
||||
* @fires Connection#parseError
|
||||
* @fires Connection#message
|
||||
*/
|
||||
class Connection extends EventEmitter {
|
||||
/**
|
||||
* Creates a new Connection instance
|
||||
*
|
||||
* **NOTE**: Internal class, do not instantiate directly
|
||||
*
|
||||
* @param {Socket} socket The socket this connection wraps
|
||||
* @param {Object} options Various settings
|
||||
* @param {object} options.bson An implementation of bson serialize and deserialize
|
||||
* @param {string} [options.host='localhost'] The host the socket is connected to
|
||||
* @param {number} [options.port=27017] The port used for the socket connection
|
||||
* @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled
|
||||
* @param {number} [options.keepAliveInitialDelay=120000] Initial delay before TCP keep alive enabled
|
||||
* @param {number} [options.connectionTimeout=30000] TCP Connection timeout setting
|
||||
* @param {number} [options.socketTimeout=360000] TCP Socket timeout setting
|
||||
* @param {boolean} [options.promoteLongs] Convert Long values from the db into Numbers if they fit into 53 bits
|
||||
* @param {boolean} [options.promoteValues] Promotes BSON values to native types where possible, set to false to only receive wrapper types.
|
||||
* @param {boolean} [options.promoteBuffers] Promotes Binary BSON values to native Node Buffers.
|
||||
* @param {number} [options.maxBsonMessageSize=0x4000000] Largest possible size of a BSON message (for legacy purposes)
|
||||
*/
|
||||
constructor(socket, options) {
|
||||
super();
|
||||
|
||||
options = options || {};
|
||||
if (!options.bson) {
|
||||
throw new TypeError('must pass in valid bson parser');
|
||||
}
|
||||
|
||||
this.id = _id++;
|
||||
this.options = options;
|
||||
this.logger = Logger('Connection', options);
|
||||
this.bson = options.bson;
|
||||
this.tag = options.tag;
|
||||
this.maxBsonMessageSize = options.maxBsonMessageSize || DEFAULT_MAX_BSON_MESSAGE_SIZE;
|
||||
|
||||
this.port = options.port || 27017;
|
||||
this.host = options.host || 'localhost';
|
||||
this.socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000;
|
||||
|
||||
// These values are inspected directly in tests, but maybe not necessary to keep around
|
||||
this.keepAlive = typeof options.keepAlive === 'boolean' ? options.keepAlive : true;
|
||||
this.keepAliveInitialDelay =
|
||||
typeof options.keepAliveInitialDelay === 'number' ? options.keepAliveInitialDelay : 120000;
|
||||
this.connectionTimeout =
|
||||
typeof options.connectionTimeout === 'number' ? options.connectionTimeout : 30000;
|
||||
if (this.keepAliveInitialDelay > this.socketTimeout) {
|
||||
this.keepAliveInitialDelay = Math.round(this.socketTimeout / 2);
|
||||
}
|
||||
|
||||
// Debug information
|
||||
if (this.logger.isDebug()) {
|
||||
this.logger.debug(
|
||||
`creating connection ${this.id} with options [${JSON.stringify(
|
||||
debugOptions(DEBUG_FIELDS, options)
|
||||
)}]`
|
||||
);
|
||||
}
|
||||
|
||||
// Response options
|
||||
this.responseOptions = {
|
||||
promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true,
|
||||
promoteValues: typeof options.promoteValues === 'boolean' ? options.promoteValues : true,
|
||||
promoteBuffers: typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false
|
||||
};
|
||||
|
||||
// Flushing
|
||||
this.flushing = false;
|
||||
this.queue = [];
|
||||
|
||||
// Internal state
|
||||
this.writeStream = null;
|
||||
this.destroyed = false;
|
||||
this.timedOut = false;
|
||||
|
||||
// Create hash method
|
||||
const hash = crypto.createHash('sha1');
|
||||
hash.update(this.address);
|
||||
this.hashedName = hash.digest('hex');
|
||||
|
||||
// All operations in flight on the connection
|
||||
this.workItems = [];
|
||||
|
||||
// setup socket
|
||||
this.socket = socket;
|
||||
this.socket.once('error', errorHandler(this));
|
||||
this.socket.once('timeout', timeoutHandler(this));
|
||||
this.socket.once('close', closeHandler(this));
|
||||
this.socket.on('data', dataHandler(this));
|
||||
|
||||
if (connectionAccounting) {
|
||||
addConnection(this.id, this);
|
||||
}
|
||||
}
|
||||
|
||||
setSocketTimeout(value) {
|
||||
if (this.socket) {
|
||||
this.socket.setTimeout(value);
|
||||
}
|
||||
}
|
||||
|
||||
resetSocketTimeout() {
|
||||
if (this.socket) {
|
||||
this.socket.setTimeout(this.socketTimeout);
|
||||
}
|
||||
}
|
||||
|
||||
static enableConnectionAccounting(spy) {
|
||||
if (spy) {
|
||||
connectionAccountingSpy = spy;
|
||||
}
|
||||
|
||||
connectionAccounting = true;
|
||||
connections = {};
|
||||
}
|
||||
|
||||
static disableConnectionAccounting() {
|
||||
connectionAccounting = false;
|
||||
connectionAccountingSpy = undefined;
|
||||
}
|
||||
|
||||
static connections() {
|
||||
return connections;
|
||||
}
|
||||
|
||||
get address() {
|
||||
return `${this.host}:${this.port}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Unref this connection
|
||||
* @method
|
||||
* @return {boolean}
|
||||
*/
|
||||
unref() {
|
||||
if (this.socket == null) {
|
||||
this.once('connect', () => this.socket.unref());
|
||||
return;
|
||||
}
|
||||
|
||||
this.socket.unref();
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush all work Items on this connection
|
||||
*
|
||||
* @param {*} err The error to propagate to the flushed work items
|
||||
*/
|
||||
flush(err) {
|
||||
while (this.workItems.length > 0) {
|
||||
const workItem = this.workItems.shift();
|
||||
if (workItem.cb) {
|
||||
workItem.cb(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroy connection
|
||||
* @method
|
||||
*/
|
||||
destroy(options, callback) {
|
||||
if (typeof options === 'function') {
|
||||
callback = options;
|
||||
options = {};
|
||||
}
|
||||
|
||||
options = Object.assign({ force: false }, options);
|
||||
|
||||
if (connectionAccounting) {
|
||||
deleteConnection(this.id);
|
||||
}
|
||||
|
||||
if (this.socket == null) {
|
||||
this.destroyed = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (options.force || this.timedOut) {
|
||||
this.socket.destroy();
|
||||
this.destroyed = true;
|
||||
if (typeof callback === 'function') callback(null, null);
|
||||
return;
|
||||
}
|
||||
|
||||
this.socket.end(err => {
|
||||
this.destroyed = true;
|
||||
if (typeof callback === 'function') callback(err, null);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Write to connection
|
||||
* @method
|
||||
* @param {Command} command Command to write out need to implement toBin and toBinUnified
|
||||
*/
|
||||
write(buffer) {
|
||||
// Debug Log
|
||||
if (this.logger.isDebug()) {
|
||||
if (!Array.isArray(buffer)) {
|
||||
this.logger.debug(`writing buffer [${buffer.toString('hex')}] to ${this.address}`);
|
||||
} else {
|
||||
for (let i = 0; i < buffer.length; i++)
|
||||
this.logger.debug(`writing buffer [${buffer[i].toString('hex')}] to ${this.address}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Double check that the connection is not destroyed
|
||||
if (this.socket.destroyed === false) {
|
||||
// Write out the command
|
||||
if (!Array.isArray(buffer)) {
|
||||
this.socket.write(buffer, 'binary');
|
||||
return true;
|
||||
}
|
||||
|
||||
// Iterate over all buffers and write them in order to the socket
|
||||
for (let i = 0; i < buffer.length; i++) {
|
||||
this.socket.write(buffer[i], 'binary');
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Connection is destroyed return write failed
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return id of connection as a string
|
||||
* @method
|
||||
* @return {string}
|
||||
*/
|
||||
toString() {
|
||||
return '' + this.id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return json object of connection
|
||||
* @method
|
||||
* @return {object}
|
||||
*/
|
||||
toJSON() {
|
||||
return { id: this.id, host: this.host, port: this.port };
|
||||
}
|
||||
|
||||
/**
|
||||
* Is the connection connected
|
||||
* @method
|
||||
* @return {boolean}
|
||||
*/
|
||||
isConnected() {
|
||||
if (this.destroyed) return false;
|
||||
return !this.socket.destroyed && this.socket.writable;
|
||||
}
|
||||
|
||||
/**
|
||||
* @ignore
|
||||
*/
|
||||
command(ns, command, options, callback) {
|
||||
if (typeof options === 'function') (callback = options), (options = {});
|
||||
|
||||
const conn = this;
|
||||
const socketTimeout =
|
||||
typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000;
|
||||
const bson = conn.options.bson;
|
||||
const query = new Query(bson, ns, command, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: 1
|
||||
});
|
||||
|
||||
const noop = () => {};
|
||||
function _callback(err, result) {
|
||||
callback(err, result);
|
||||
callback = noop;
|
||||
}
|
||||
|
||||
function errorHandler(err) {
|
||||
conn.resetSocketTimeout();
|
||||
CONNECTION_ERROR_EVENTS.forEach(eventName => conn.removeListener(eventName, errorHandler));
|
||||
conn.removeListener('message', messageHandler);
|
||||
|
||||
if (err == null) {
|
||||
err = new MongoError(`runCommand failed for connection to '${conn.address}'`);
|
||||
}
|
||||
|
||||
// ignore all future errors
|
||||
conn.on('error', noop);
|
||||
_callback(err);
|
||||
}
|
||||
|
||||
function messageHandler(msg) {
|
||||
if (msg.responseTo !== query.requestId) {
|
||||
return;
|
||||
}
|
||||
|
||||
conn.resetSocketTimeout();
|
||||
CONNECTION_ERROR_EVENTS.forEach(eventName => conn.removeListener(eventName, errorHandler));
|
||||
conn.removeListener('message', messageHandler);
|
||||
|
||||
msg.parse({ promoteValues: true });
|
||||
|
||||
const response = msg.documents[0];
|
||||
if (response.ok === 0 || response.$err || response.errmsg || response.code) {
|
||||
_callback(new MongoError(response));
|
||||
return;
|
||||
}
|
||||
|
||||
_callback(undefined, new CommandResult(response, this, msg));
|
||||
}
|
||||
|
||||
conn.setSocketTimeout(socketTimeout);
|
||||
CONNECTION_ERROR_EVENTS.forEach(eventName => conn.once(eventName, errorHandler));
|
||||
conn.on('message', messageHandler);
|
||||
conn.write(query.toBin());
|
||||
}
|
||||
}
|
||||
|
||||
const CONNECTION_ERROR_EVENTS = ['error', 'close', 'timeout', 'parseError'];
|
||||
|
||||
function deleteConnection(id) {
|
||||
// console.log("=== deleted connection " + id + " :: " + (connections[id] ? connections[id].port : ''))
|
||||
delete connections[id];
|
||||
|
||||
if (connectionAccountingSpy) {
|
||||
connectionAccountingSpy.deleteConnection(id);
|
||||
}
|
||||
}
|
||||
|
||||
function addConnection(id, connection) {
|
||||
// console.log("=== added connection " + id + " :: " + connection.port)
|
||||
connections[id] = connection;
|
||||
|
||||
if (connectionAccountingSpy) {
|
||||
connectionAccountingSpy.addConnection(id, connection);
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Connection handlers
|
||||
function errorHandler(conn) {
|
||||
return function(err) {
|
||||
if (connectionAccounting) deleteConnection(conn.id);
|
||||
// Debug information
|
||||
if (conn.logger.isDebug()) {
|
||||
conn.logger.debug(
|
||||
`connection ${conn.id} for [${conn.address}] errored out with [${JSON.stringify(err)}]`
|
||||
);
|
||||
}
|
||||
|
||||
conn.emit('error', new MongoNetworkError(err), conn);
|
||||
};
|
||||
}
|
||||
|
||||
function timeoutHandler(conn) {
|
||||
return function() {
|
||||
if (connectionAccounting) deleteConnection(conn.id);
|
||||
|
||||
if (conn.logger.isDebug()) {
|
||||
conn.logger.debug(`connection ${conn.id} for [${conn.address}] timed out`);
|
||||
}
|
||||
|
||||
conn.timedOut = true;
|
||||
conn.emit(
|
||||
'timeout',
|
||||
new MongoNetworkTimeoutError(`connection ${conn.id} to ${conn.address} timed out`, {
|
||||
beforeHandshake: conn.ismaster == null
|
||||
}),
|
||||
conn
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
function closeHandler(conn) {
|
||||
return function(hadError) {
|
||||
if (connectionAccounting) deleteConnection(conn.id);
|
||||
|
||||
if (conn.logger.isDebug()) {
|
||||
conn.logger.debug(`connection ${conn.id} with for [${conn.address}] closed`);
|
||||
}
|
||||
|
||||
if (!hadError) {
|
||||
conn.emit(
|
||||
'close',
|
||||
new MongoNetworkError(`connection ${conn.id} to ${conn.address} closed`),
|
||||
conn
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Handle a message once it is received
|
||||
function processMessage(conn, message) {
|
||||
const msgHeader = parseHeader(message);
|
||||
if (msgHeader.opCode !== OP_COMPRESSED) {
|
||||
const ResponseConstructor = msgHeader.opCode === OP_MSG ? BinMsg : Response;
|
||||
conn.emit(
|
||||
'message',
|
||||
new ResponseConstructor(
|
||||
conn.bson,
|
||||
message,
|
||||
msgHeader,
|
||||
message.slice(MESSAGE_HEADER_SIZE),
|
||||
conn.responseOptions
|
||||
),
|
||||
conn
|
||||
);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
msgHeader.fromCompressed = true;
|
||||
let index = MESSAGE_HEADER_SIZE;
|
||||
msgHeader.opCode = message.readInt32LE(index);
|
||||
index += 4;
|
||||
msgHeader.length = message.readInt32LE(index);
|
||||
index += 4;
|
||||
const compressorID = message[index];
|
||||
index++;
|
||||
|
||||
decompress(compressorID, message.slice(index), (err, decompressedMsgBody) => {
|
||||
if (err) {
|
||||
conn.emit('error', err);
|
||||
return;
|
||||
}
|
||||
|
||||
if (decompressedMsgBody.length !== msgHeader.length) {
|
||||
conn.emit(
|
||||
'error',
|
||||
new MongoError(
|
||||
'Decompressing a compressed message from the server failed. The message is corrupt.'
|
||||
)
|
||||
);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
const ResponseConstructor = msgHeader.opCode === OP_MSG ? BinMsg : Response;
|
||||
conn.emit(
|
||||
'message',
|
||||
new ResponseConstructor(
|
||||
conn.bson,
|
||||
message,
|
||||
msgHeader,
|
||||
decompressedMsgBody,
|
||||
conn.responseOptions
|
||||
),
|
||||
conn
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
function dataHandler(conn) {
|
||||
return function(data) {
|
||||
// Parse until we are done with the data
|
||||
while (data.length > 0) {
|
||||
// If we still have bytes to read on the current message
|
||||
if (conn.bytesRead > 0 && conn.sizeOfMessage > 0) {
|
||||
// Calculate the amount of remaining bytes
|
||||
const remainingBytesToRead = conn.sizeOfMessage - conn.bytesRead;
|
||||
// Check if the current chunk contains the rest of the message
|
||||
if (remainingBytesToRead > data.length) {
|
||||
// Copy the new data into the exiting buffer (should have been allocated when we know the message size)
|
||||
data.copy(conn.buffer, conn.bytesRead);
|
||||
// Adjust the number of bytes read so it point to the correct index in the buffer
|
||||
conn.bytesRead = conn.bytesRead + data.length;
|
||||
|
||||
// Reset state of buffer
|
||||
data = Buffer.alloc(0);
|
||||
} else {
|
||||
// Copy the missing part of the data into our current buffer
|
||||
data.copy(conn.buffer, conn.bytesRead, 0, remainingBytesToRead);
|
||||
// Slice the overflow into a new buffer that we will then re-parse
|
||||
data = data.slice(remainingBytesToRead);
|
||||
|
||||
// Emit current complete message
|
||||
const emitBuffer = conn.buffer;
|
||||
// Reset state of buffer
|
||||
conn.buffer = null;
|
||||
conn.sizeOfMessage = 0;
|
||||
conn.bytesRead = 0;
|
||||
conn.stubBuffer = null;
|
||||
|
||||
processMessage(conn, emitBuffer);
|
||||
}
|
||||
} else {
|
||||
// Stub buffer is kept in case we don't get enough bytes to determine the
|
||||
// size of the message (< 4 bytes)
|
||||
if (conn.stubBuffer != null && conn.stubBuffer.length > 0) {
|
||||
// If we have enough bytes to determine the message size let's do it
|
||||
if (conn.stubBuffer.length + data.length > 4) {
|
||||
// Prepad the data
|
||||
const newData = Buffer.alloc(conn.stubBuffer.length + data.length);
|
||||
conn.stubBuffer.copy(newData, 0);
|
||||
data.copy(newData, conn.stubBuffer.length);
|
||||
// Reassign for parsing
|
||||
data = newData;
|
||||
|
||||
// Reset state of buffer
|
||||
conn.buffer = null;
|
||||
conn.sizeOfMessage = 0;
|
||||
conn.bytesRead = 0;
|
||||
conn.stubBuffer = null;
|
||||
} else {
|
||||
// Add the the bytes to the stub buffer
|
||||
const newStubBuffer = Buffer.alloc(conn.stubBuffer.length + data.length);
|
||||
// Copy existing stub buffer
|
||||
conn.stubBuffer.copy(newStubBuffer, 0);
|
||||
// Copy missing part of the data
|
||||
data.copy(newStubBuffer, conn.stubBuffer.length);
|
||||
// Exit parsing loop
|
||||
data = Buffer.alloc(0);
|
||||
}
|
||||
} else {
|
||||
if (data.length > 4) {
|
||||
// Retrieve the message size
|
||||
const sizeOfMessage = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24);
|
||||
// If we have a negative sizeOfMessage emit error and return
|
||||
if (sizeOfMessage < 0 || sizeOfMessage > conn.maxBsonMessageSize) {
|
||||
const errorObject = {
|
||||
err: 'socketHandler',
|
||||
trace: '',
|
||||
bin: conn.buffer,
|
||||
parseState: {
|
||||
sizeOfMessage: sizeOfMessage,
|
||||
bytesRead: conn.bytesRead,
|
||||
stubBuffer: conn.stubBuffer
|
||||
}
|
||||
};
|
||||
// We got a parse Error fire it off then keep going
|
||||
conn.emit('parseError', errorObject, conn);
|
||||
return;
|
||||
}
|
||||
|
||||
// Ensure that the size of message is larger than 0 and less than the max allowed
|
||||
if (
|
||||
sizeOfMessage > 4 &&
|
||||
sizeOfMessage < conn.maxBsonMessageSize &&
|
||||
sizeOfMessage > data.length
|
||||
) {
|
||||
conn.buffer = Buffer.alloc(sizeOfMessage);
|
||||
// Copy all the data into the buffer
|
||||
data.copy(conn.buffer, 0);
|
||||
// Update bytes read
|
||||
conn.bytesRead = data.length;
|
||||
// Update sizeOfMessage
|
||||
conn.sizeOfMessage = sizeOfMessage;
|
||||
// Ensure stub buffer is null
|
||||
conn.stubBuffer = null;
|
||||
// Exit parsing loop
|
||||
data = Buffer.alloc(0);
|
||||
} else if (
|
||||
sizeOfMessage > 4 &&
|
||||
sizeOfMessage < conn.maxBsonMessageSize &&
|
||||
sizeOfMessage === data.length
|
||||
) {
|
||||
const emitBuffer = data;
|
||||
// Reset state of buffer
|
||||
conn.buffer = null;
|
||||
conn.sizeOfMessage = 0;
|
||||
conn.bytesRead = 0;
|
||||
conn.stubBuffer = null;
|
||||
// Exit parsing loop
|
||||
data = Buffer.alloc(0);
|
||||
// Emit the message
|
||||
processMessage(conn, emitBuffer);
|
||||
} else if (sizeOfMessage <= 4 || sizeOfMessage > conn.maxBsonMessageSize) {
|
||||
const errorObject = {
|
||||
err: 'socketHandler',
|
||||
trace: null,
|
||||
bin: data,
|
||||
parseState: {
|
||||
sizeOfMessage: sizeOfMessage,
|
||||
bytesRead: 0,
|
||||
buffer: null,
|
||||
stubBuffer: null
|
||||
}
|
||||
};
|
||||
// We got a parse Error fire it off then keep going
|
||||
conn.emit('parseError', errorObject, conn);
|
||||
|
||||
// Clear out the state of the parser
|
||||
conn.buffer = null;
|
||||
conn.sizeOfMessage = 0;
|
||||
conn.bytesRead = 0;
|
||||
conn.stubBuffer = null;
|
||||
// Exit parsing loop
|
||||
data = Buffer.alloc(0);
|
||||
} else {
|
||||
const emitBuffer = data.slice(0, sizeOfMessage);
|
||||
// Reset state of buffer
|
||||
conn.buffer = null;
|
||||
conn.sizeOfMessage = 0;
|
||||
conn.bytesRead = 0;
|
||||
conn.stubBuffer = null;
|
||||
// Copy rest of message
|
||||
data = data.slice(sizeOfMessage);
|
||||
// Emit the message
|
||||
processMessage(conn, emitBuffer);
|
||||
}
|
||||
} else {
|
||||
// Create a buffer that contains the space for the non-complete message
|
||||
conn.stubBuffer = Buffer.alloc(data.length);
|
||||
// Copy the data to the stub buffer
|
||||
data.copy(conn.stubBuffer, 0);
|
||||
// Exit parsing loop
|
||||
data = Buffer.alloc(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* A server connect event, used to verify that the connection is up and running
|
||||
*
|
||||
* @event Connection#connect
|
||||
* @type {Connection}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The server connection closed, all pool connections closed
|
||||
*
|
||||
* @event Connection#close
|
||||
* @type {Connection}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The server connection caused an error, all pool connections closed
|
||||
*
|
||||
* @event Connection#error
|
||||
* @type {Connection}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The server connection timed out, all pool connections closed
|
||||
*
|
||||
* @event Connection#timeout
|
||||
* @type {Connection}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The driver experienced an invalid message, all pool connections closed
|
||||
*
|
||||
* @event Connection#parseError
|
||||
* @type {Connection}
|
||||
*/
|
||||
|
||||
/**
|
||||
* An event emitted each time the connection receives a parsed message from the wire
|
||||
*
|
||||
* @event Connection#message
|
||||
* @type {Connection}
|
||||
*/
|
||||
|
||||
module.exports = Connection;
|
||||
251
node_modules/mongodb/lib/core/connection/logger.js
generated
vendored
Normal file
251
node_modules/mongodb/lib/core/connection/logger.js
generated
vendored
Normal file
@@ -0,0 +1,251 @@
|
||||
'use strict';
|
||||
|
||||
var f = require('util').format,
|
||||
MongoError = require('../error').MongoError;
|
||||
|
||||
// Filters for classes
|
||||
var classFilters = {};
|
||||
var filteredClasses = {};
|
||||
var level = null;
|
||||
// Save the process id
|
||||
var pid = process.pid;
|
||||
// current logger
|
||||
var currentLogger = null;
|
||||
|
||||
/**
|
||||
* @callback Logger~loggerCallback
|
||||
* @param {string} msg message being logged
|
||||
* @param {object} state an object containing more metadata about the logging message
|
||||
*/
|
||||
|
||||
/**
|
||||
* Creates a new Logger instance
|
||||
* @class
|
||||
* @param {string} className The Class name associated with the logging instance
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {Logger~loggerCallback} [options.logger=null] Custom logger function;
|
||||
* @param {string} [options.loggerLevel=error] Override default global log level.
|
||||
*/
|
||||
var Logger = function(className, options) {
|
||||
if (!(this instanceof Logger)) return new Logger(className, options);
|
||||
options = options || {};
|
||||
|
||||
// Current reference
|
||||
this.className = className;
|
||||
|
||||
// Current logger
|
||||
if (options.logger) {
|
||||
currentLogger = options.logger;
|
||||
} else if (currentLogger == null) {
|
||||
currentLogger = console.log;
|
||||
}
|
||||
|
||||
// Set level of logging, default is error
|
||||
if (options.loggerLevel) {
|
||||
level = options.loggerLevel || 'error';
|
||||
}
|
||||
|
||||
// Add all class names
|
||||
if (filteredClasses[this.className] == null) classFilters[this.className] = true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Log a message at the debug level
|
||||
* @method
|
||||
* @param {string} message The message to log
|
||||
* @param {object} object additional meta data to log
|
||||
* @return {null}
|
||||
*/
|
||||
Logger.prototype.debug = function(message, object) {
|
||||
if (
|
||||
this.isDebug() &&
|
||||
((Object.keys(filteredClasses).length > 0 && filteredClasses[this.className]) ||
|
||||
(Object.keys(filteredClasses).length === 0 && classFilters[this.className]))
|
||||
) {
|
||||
var dateTime = new Date().getTime();
|
||||
var msg = f('[%s-%s:%s] %s %s', 'DEBUG', this.className, pid, dateTime, message);
|
||||
var state = {
|
||||
type: 'debug',
|
||||
message: message,
|
||||
className: this.className,
|
||||
pid: pid,
|
||||
date: dateTime
|
||||
};
|
||||
if (object) state.meta = object;
|
||||
currentLogger(msg, state);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Log a message at the warn level
|
||||
* @method
|
||||
* @param {string} message The message to log
|
||||
* @param {object} object additional meta data to log
|
||||
* @return {null}
|
||||
*/
|
||||
(Logger.prototype.warn = function(message, object) {
|
||||
if (
|
||||
this.isWarn() &&
|
||||
((Object.keys(filteredClasses).length > 0 && filteredClasses[this.className]) ||
|
||||
(Object.keys(filteredClasses).length === 0 && classFilters[this.className]))
|
||||
) {
|
||||
var dateTime = new Date().getTime();
|
||||
var msg = f('[%s-%s:%s] %s %s', 'WARN', this.className, pid, dateTime, message);
|
||||
var state = {
|
||||
type: 'warn',
|
||||
message: message,
|
||||
className: this.className,
|
||||
pid: pid,
|
||||
date: dateTime
|
||||
};
|
||||
if (object) state.meta = object;
|
||||
currentLogger(msg, state);
|
||||
}
|
||||
}),
|
||||
/**
|
||||
* Log a message at the info level
|
||||
* @method
|
||||
* @param {string} message The message to log
|
||||
* @param {object} object additional meta data to log
|
||||
* @return {null}
|
||||
*/
|
||||
(Logger.prototype.info = function(message, object) {
|
||||
if (
|
||||
this.isInfo() &&
|
||||
((Object.keys(filteredClasses).length > 0 && filteredClasses[this.className]) ||
|
||||
(Object.keys(filteredClasses).length === 0 && classFilters[this.className]))
|
||||
) {
|
||||
var dateTime = new Date().getTime();
|
||||
var msg = f('[%s-%s:%s] %s %s', 'INFO', this.className, pid, dateTime, message);
|
||||
var state = {
|
||||
type: 'info',
|
||||
message: message,
|
||||
className: this.className,
|
||||
pid: pid,
|
||||
date: dateTime
|
||||
};
|
||||
if (object) state.meta = object;
|
||||
currentLogger(msg, state);
|
||||
}
|
||||
}),
|
||||
/**
|
||||
* Log a message at the error level
|
||||
* @method
|
||||
* @param {string} message The message to log
|
||||
* @param {object} object additional meta data to log
|
||||
* @return {null}
|
||||
*/
|
||||
(Logger.prototype.error = function(message, object) {
|
||||
if (
|
||||
this.isError() &&
|
||||
((Object.keys(filteredClasses).length > 0 && filteredClasses[this.className]) ||
|
||||
(Object.keys(filteredClasses).length === 0 && classFilters[this.className]))
|
||||
) {
|
||||
var dateTime = new Date().getTime();
|
||||
var msg = f('[%s-%s:%s] %s %s', 'ERROR', this.className, pid, dateTime, message);
|
||||
var state = {
|
||||
type: 'error',
|
||||
message: message,
|
||||
className: this.className,
|
||||
pid: pid,
|
||||
date: dateTime
|
||||
};
|
||||
if (object) state.meta = object;
|
||||
currentLogger(msg, state);
|
||||
}
|
||||
}),
|
||||
/**
|
||||
* Is the logger set at info level
|
||||
* @method
|
||||
* @return {boolean}
|
||||
*/
|
||||
(Logger.prototype.isInfo = function() {
|
||||
return level === 'info' || level === 'debug';
|
||||
}),
|
||||
/**
|
||||
* Is the logger set at error level
|
||||
* @method
|
||||
* @return {boolean}
|
||||
*/
|
||||
(Logger.prototype.isError = function() {
|
||||
return level === 'error' || level === 'info' || level === 'debug';
|
||||
}),
|
||||
/**
|
||||
* Is the logger set at error level
|
||||
* @method
|
||||
* @return {boolean}
|
||||
*/
|
||||
(Logger.prototype.isWarn = function() {
|
||||
return level === 'error' || level === 'warn' || level === 'info' || level === 'debug';
|
||||
}),
|
||||
/**
|
||||
* Is the logger set at debug level
|
||||
* @method
|
||||
* @return {boolean}
|
||||
*/
|
||||
(Logger.prototype.isDebug = function() {
|
||||
return level === 'debug';
|
||||
});
|
||||
|
||||
/**
|
||||
* Resets the logger to default settings, error and no filtered classes
|
||||
* @method
|
||||
* @return {null}
|
||||
*/
|
||||
Logger.reset = function() {
|
||||
level = 'error';
|
||||
filteredClasses = {};
|
||||
};
|
||||
|
||||
/**
|
||||
* Get the current logger function
|
||||
* @method
|
||||
* @return {Logger~loggerCallback}
|
||||
*/
|
||||
Logger.currentLogger = function() {
|
||||
return currentLogger;
|
||||
};
|
||||
|
||||
/**
|
||||
* Set the current logger function
|
||||
* @method
|
||||
* @param {Logger~loggerCallback} logger Logger function.
|
||||
* @return {null}
|
||||
*/
|
||||
Logger.setCurrentLogger = function(logger) {
|
||||
if (typeof logger !== 'function') throw new MongoError('current logger must be a function');
|
||||
currentLogger = logger;
|
||||
};
|
||||
|
||||
/**
|
||||
* Set what classes to log.
|
||||
* @method
|
||||
* @param {string} type The type of filter (currently only class)
|
||||
* @param {string[]} values The filters to apply
|
||||
* @return {null}
|
||||
*/
|
||||
Logger.filter = function(type, values) {
|
||||
if (type === 'class' && Array.isArray(values)) {
|
||||
filteredClasses = {};
|
||||
|
||||
values.forEach(function(x) {
|
||||
filteredClasses[x] = true;
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Set the current log level
|
||||
* @method
|
||||
* @param {string} level Set current log level (debug, info, error)
|
||||
* @return {null}
|
||||
*/
|
||||
Logger.setLevel = function(_level) {
|
||||
if (_level !== 'info' && _level !== 'error' && _level !== 'debug' && _level !== 'warn') {
|
||||
throw new Error(f('%s is an illegal logging level', _level));
|
||||
}
|
||||
|
||||
level = _level;
|
||||
};
|
||||
|
||||
module.exports = Logger;
|
||||
222
node_modules/mongodb/lib/core/connection/msg.js
generated
vendored
Normal file
222
node_modules/mongodb/lib/core/connection/msg.js
generated
vendored
Normal file
@@ -0,0 +1,222 @@
|
||||
'use strict';
|
||||
|
||||
// Implementation of OP_MSG spec:
|
||||
// https://github.com/mongodb/specifications/blob/master/source/message/OP_MSG.rst
|
||||
//
|
||||
// struct Section {
|
||||
// uint8 payloadType;
|
||||
// union payload {
|
||||
// document document; // payloadType == 0
|
||||
// struct sequence { // payloadType == 1
|
||||
// int32 size;
|
||||
// cstring identifier;
|
||||
// document* documents;
|
||||
// };
|
||||
// };
|
||||
// };
|
||||
|
||||
// struct OP_MSG {
|
||||
// struct MsgHeader {
|
||||
// int32 messageLength;
|
||||
// int32 requestID;
|
||||
// int32 responseTo;
|
||||
// int32 opCode = 2013;
|
||||
// };
|
||||
// uint32 flagBits;
|
||||
// Section+ sections;
|
||||
// [uint32 checksum;]
|
||||
// };
|
||||
|
||||
const Buffer = require('safe-buffer').Buffer;
|
||||
const opcodes = require('../wireprotocol/shared').opcodes;
|
||||
const databaseNamespace = require('../wireprotocol/shared').databaseNamespace;
|
||||
const ReadPreference = require('../topologies/read_preference');
|
||||
|
||||
// Incrementing request id
|
||||
let _requestId = 0;
|
||||
|
||||
// Msg Flags
|
||||
const OPTS_CHECKSUM_PRESENT = 1;
|
||||
const OPTS_MORE_TO_COME = 2;
|
||||
const OPTS_EXHAUST_ALLOWED = 1 << 16;
|
||||
|
||||
class Msg {
|
||||
constructor(bson, ns, command, options) {
|
||||
// Basic options needed to be passed in
|
||||
if (command == null) throw new Error('query must be specified for query');
|
||||
|
||||
// Basic options
|
||||
this.bson = bson;
|
||||
this.ns = ns;
|
||||
this.command = command;
|
||||
this.command.$db = databaseNamespace(ns);
|
||||
|
||||
if (options.readPreference && options.readPreference.mode !== ReadPreference.PRIMARY) {
|
||||
this.command.$readPreference = options.readPreference.toJSON();
|
||||
}
|
||||
|
||||
// Ensure empty options
|
||||
this.options = options || {};
|
||||
|
||||
// Additional options
|
||||
this.requestId = options.requestId ? options.requestId : Msg.getRequestId();
|
||||
|
||||
// Serialization option
|
||||
this.serializeFunctions =
|
||||
typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false;
|
||||
this.ignoreUndefined =
|
||||
typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false;
|
||||
this.checkKeys = typeof options.checkKeys === 'boolean' ? options.checkKeys : false;
|
||||
this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16;
|
||||
|
||||
// flags
|
||||
this.checksumPresent = false;
|
||||
this.moreToCome = options.moreToCome || false;
|
||||
this.exhaustAllowed =
|
||||
typeof options.exhaustAllowed === 'boolean' ? options.exhaustAllowed : false;
|
||||
}
|
||||
|
||||
toBin() {
|
||||
const buffers = [];
|
||||
let flags = 0;
|
||||
|
||||
if (this.checksumPresent) {
|
||||
flags |= OPTS_CHECKSUM_PRESENT;
|
||||
}
|
||||
|
||||
if (this.moreToCome) {
|
||||
flags |= OPTS_MORE_TO_COME;
|
||||
}
|
||||
|
||||
if (this.exhaustAllowed) {
|
||||
flags |= OPTS_EXHAUST_ALLOWED;
|
||||
}
|
||||
|
||||
const header = Buffer.alloc(
|
||||
4 * 4 + // Header
|
||||
4 // Flags
|
||||
);
|
||||
|
||||
buffers.push(header);
|
||||
|
||||
let totalLength = header.length;
|
||||
const command = this.command;
|
||||
totalLength += this.makeDocumentSegment(buffers, command);
|
||||
|
||||
header.writeInt32LE(totalLength, 0); // messageLength
|
||||
header.writeInt32LE(this.requestId, 4); // requestID
|
||||
header.writeInt32LE(0, 8); // responseTo
|
||||
header.writeInt32LE(opcodes.OP_MSG, 12); // opCode
|
||||
header.writeUInt32LE(flags, 16); // flags
|
||||
return buffers;
|
||||
}
|
||||
|
||||
makeDocumentSegment(buffers, document) {
|
||||
const payloadTypeBuffer = Buffer.alloc(1);
|
||||
payloadTypeBuffer[0] = 0;
|
||||
|
||||
const documentBuffer = this.serializeBson(document);
|
||||
buffers.push(payloadTypeBuffer);
|
||||
buffers.push(documentBuffer);
|
||||
|
||||
return payloadTypeBuffer.length + documentBuffer.length;
|
||||
}
|
||||
|
||||
serializeBson(document) {
|
||||
return this.bson.serialize(document, {
|
||||
checkKeys: this.checkKeys,
|
||||
serializeFunctions: this.serializeFunctions,
|
||||
ignoreUndefined: this.ignoreUndefined
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Msg.getRequestId = function() {
|
||||
_requestId = (_requestId + 1) & 0x7fffffff;
|
||||
return _requestId;
|
||||
};
|
||||
|
||||
class BinMsg {
|
||||
constructor(bson, message, msgHeader, msgBody, opts) {
|
||||
opts = opts || { promoteLongs: true, promoteValues: true, promoteBuffers: false };
|
||||
this.parsed = false;
|
||||
this.raw = message;
|
||||
this.data = msgBody;
|
||||
this.bson = bson;
|
||||
this.opts = opts;
|
||||
|
||||
// Read the message header
|
||||
this.length = msgHeader.length;
|
||||
this.requestId = msgHeader.requestId;
|
||||
this.responseTo = msgHeader.responseTo;
|
||||
this.opCode = msgHeader.opCode;
|
||||
this.fromCompressed = msgHeader.fromCompressed;
|
||||
|
||||
// Read response flags
|
||||
this.responseFlags = msgBody.readInt32LE(0);
|
||||
this.checksumPresent = (this.responseFlags & OPTS_CHECKSUM_PRESENT) !== 0;
|
||||
this.moreToCome = (this.responseFlags & OPTS_MORE_TO_COME) !== 0;
|
||||
this.exhaustAllowed = (this.responseFlags & OPTS_EXHAUST_ALLOWED) !== 0;
|
||||
this.promoteLongs = typeof opts.promoteLongs === 'boolean' ? opts.promoteLongs : true;
|
||||
this.promoteValues = typeof opts.promoteValues === 'boolean' ? opts.promoteValues : true;
|
||||
this.promoteBuffers = typeof opts.promoteBuffers === 'boolean' ? opts.promoteBuffers : false;
|
||||
|
||||
this.documents = [];
|
||||
}
|
||||
|
||||
isParsed() {
|
||||
return this.parsed;
|
||||
}
|
||||
|
||||
parse(options) {
|
||||
// Don't parse again if not needed
|
||||
if (this.parsed) return;
|
||||
options = options || {};
|
||||
|
||||
this.index = 4;
|
||||
// Allow the return of raw documents instead of parsing
|
||||
const raw = options.raw || false;
|
||||
const documentsReturnedIn = options.documentsReturnedIn || null;
|
||||
const promoteLongs =
|
||||
typeof options.promoteLongs === 'boolean' ? options.promoteLongs : this.opts.promoteLongs;
|
||||
const promoteValues =
|
||||
typeof options.promoteValues === 'boolean' ? options.promoteValues : this.opts.promoteValues;
|
||||
const promoteBuffers =
|
||||
typeof options.promoteBuffers === 'boolean'
|
||||
? options.promoteBuffers
|
||||
: this.opts.promoteBuffers;
|
||||
|
||||
// Set up the options
|
||||
const _options = {
|
||||
promoteLongs: promoteLongs,
|
||||
promoteValues: promoteValues,
|
||||
promoteBuffers: promoteBuffers
|
||||
};
|
||||
|
||||
while (this.index < this.data.length) {
|
||||
const payloadType = this.data.readUInt8(this.index++);
|
||||
if (payloadType === 1) {
|
||||
console.error('TYPE 1');
|
||||
} else if (payloadType === 0) {
|
||||
const bsonSize = this.data.readUInt32LE(this.index);
|
||||
const bin = this.data.slice(this.index, this.index + bsonSize);
|
||||
this.documents.push(raw ? bin : this.bson.deserialize(bin, _options));
|
||||
|
||||
this.index += bsonSize;
|
||||
}
|
||||
}
|
||||
|
||||
if (this.documents.length === 1 && documentsReturnedIn != null && raw) {
|
||||
const fieldsAsRaw = {};
|
||||
fieldsAsRaw[documentsReturnedIn] = true;
|
||||
_options.fieldsAsRaw = fieldsAsRaw;
|
||||
|
||||
const doc = this.bson.deserialize(this.documents[0], _options);
|
||||
this.documents = [doc];
|
||||
}
|
||||
|
||||
this.parsed = true;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { Msg, BinMsg };
|
||||
1281
node_modules/mongodb/lib/core/connection/pool.js
generated
vendored
Normal file
1281
node_modules/mongodb/lib/core/connection/pool.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
57
node_modules/mongodb/lib/core/connection/utils.js
generated
vendored
Normal file
57
node_modules/mongodb/lib/core/connection/utils.js
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
'use strict';
|
||||
|
||||
const require_optional = require('require_optional');
|
||||
|
||||
function debugOptions(debugFields, options) {
|
||||
var finaloptions = {};
|
||||
debugFields.forEach(function(n) {
|
||||
finaloptions[n] = options[n];
|
||||
});
|
||||
|
||||
return finaloptions;
|
||||
}
|
||||
|
||||
function retrieveBSON() {
|
||||
var BSON = require('bson');
|
||||
BSON.native = false;
|
||||
|
||||
try {
|
||||
var optionalBSON = require_optional('bson-ext');
|
||||
if (optionalBSON) {
|
||||
optionalBSON.native = true;
|
||||
return optionalBSON;
|
||||
}
|
||||
} catch (err) {} // eslint-disable-line
|
||||
|
||||
return BSON;
|
||||
}
|
||||
|
||||
// Throw an error if an attempt to use Snappy is made when Snappy is not installed
|
||||
function noSnappyWarning() {
|
||||
throw new Error(
|
||||
'Attempted to use Snappy compression, but Snappy is not installed. Install or disable Snappy compression and try again.'
|
||||
);
|
||||
}
|
||||
|
||||
// Facilitate loading Snappy optionally
|
||||
function retrieveSnappy() {
|
||||
var snappy = null;
|
||||
try {
|
||||
snappy = require_optional('snappy');
|
||||
} catch (error) {} // eslint-disable-line
|
||||
if (!snappy) {
|
||||
snappy = {
|
||||
compress: noSnappyWarning,
|
||||
uncompress: noSnappyWarning,
|
||||
compressSync: noSnappyWarning,
|
||||
uncompressSync: noSnappyWarning
|
||||
};
|
||||
}
|
||||
return snappy;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
debugOptions,
|
||||
retrieveBSON,
|
||||
retrieveSnappy
|
||||
};
|
||||
871
node_modules/mongodb/lib/core/cursor.js
generated
vendored
Normal file
871
node_modules/mongodb/lib/core/cursor.js
generated
vendored
Normal file
@@ -0,0 +1,871 @@
|
||||
'use strict';
|
||||
|
||||
const Logger = require('./connection/logger');
|
||||
const retrieveBSON = require('./connection/utils').retrieveBSON;
|
||||
const MongoError = require('./error').MongoError;
|
||||
const MongoNetworkError = require('./error').MongoNetworkError;
|
||||
const collationNotSupported = require('./utils').collationNotSupported;
|
||||
const ReadPreference = require('./topologies/read_preference');
|
||||
const isUnifiedTopology = require('./utils').isUnifiedTopology;
|
||||
const executeOperation = require('../operations/execute_operation');
|
||||
const Readable = require('stream').Readable;
|
||||
const SUPPORTS = require('../utils').SUPPORTS;
|
||||
const MongoDBNamespace = require('../utils').MongoDBNamespace;
|
||||
const OperationBase = require('../operations/operation').OperationBase;
|
||||
|
||||
const BSON = retrieveBSON();
|
||||
const Long = BSON.Long;
|
||||
|
||||
// Possible states for a cursor
|
||||
const CursorState = {
|
||||
INIT: 0,
|
||||
OPEN: 1,
|
||||
CLOSED: 2,
|
||||
GET_MORE: 3
|
||||
};
|
||||
|
||||
//
|
||||
// Handle callback (including any exceptions thrown)
|
||||
function handleCallback(callback, err, result) {
|
||||
try {
|
||||
callback(err, result);
|
||||
} catch (err) {
|
||||
process.nextTick(function() {
|
||||
throw err;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a cursor results callback
|
||||
*
|
||||
* @callback resultCallback
|
||||
* @param {error} error An error object. Set to null if no error present
|
||||
* @param {object} document
|
||||
*/
|
||||
|
||||
/**
|
||||
* @fileOverview The **Cursor** class is an internal class that embodies a cursor on MongoDB
|
||||
* allowing for iteration over the results returned from the underlying query.
|
||||
*
|
||||
* **CURSORS Cannot directly be instantiated**
|
||||
*/
|
||||
|
||||
/**
|
||||
* The core cursor class. All cursors in the driver build off of this one.
|
||||
*
|
||||
* @property {number} cursorBatchSize The current cursorBatchSize for the cursor
|
||||
* @property {number} cursorLimit The current cursorLimit for the cursor
|
||||
* @property {number} cursorSkip The current cursorSkip for the cursor
|
||||
*/
|
||||
class CoreCursor extends Readable {
|
||||
/**
|
||||
* Create a new core `Cursor` instance.
|
||||
* **NOTE** Not to be instantiated directly
|
||||
*
|
||||
* @param {object} topology The server topology instance.
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {{object}|Long} cmd The selector (can be a command or a cursorId)
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {object} [options.batchSize=1000] The number of documents to return per batch. See {@link https://docs.mongodb.com/manual/reference/command/find/| find command documentation} and {@link https://docs.mongodb.com/manual/reference/command/aggregate|aggregation documentation}.
|
||||
* @param {array} [options.documents=[]] Initial documents list for cursor
|
||||
* @param {object} [options.transforms=null] Transform methods for the cursor results
|
||||
* @param {function} [options.transforms.query] Transform the value returned from the initial query
|
||||
* @param {function} [options.transforms.doc] Transform each document returned from Cursor.prototype._next
|
||||
*/
|
||||
constructor(topology, ns, cmd, options) {
|
||||
super({ objectMode: true });
|
||||
options = options || {};
|
||||
|
||||
if (ns instanceof OperationBase) {
|
||||
this.operation = ns;
|
||||
ns = this.operation.ns.toString();
|
||||
options = this.operation.options;
|
||||
cmd = this.operation.cmd ? this.operation.cmd : {};
|
||||
}
|
||||
|
||||
// Cursor pool
|
||||
this.pool = null;
|
||||
// Cursor server
|
||||
this.server = null;
|
||||
|
||||
// Do we have a not connected handler
|
||||
this.disconnectHandler = options.disconnectHandler;
|
||||
|
||||
// Set local values
|
||||
this.bson = topology.s.bson;
|
||||
this.ns = ns;
|
||||
this.namespace = MongoDBNamespace.fromString(ns);
|
||||
this.cmd = cmd;
|
||||
this.options = options;
|
||||
this.topology = topology;
|
||||
|
||||
// All internal state
|
||||
this.cursorState = {
|
||||
cursorId: null,
|
||||
cmd,
|
||||
documents: options.documents || [],
|
||||
cursorIndex: 0,
|
||||
dead: false,
|
||||
killed: false,
|
||||
init: false,
|
||||
notified: false,
|
||||
limit: options.limit || cmd.limit || 0,
|
||||
skip: options.skip || cmd.skip || 0,
|
||||
batchSize: options.batchSize || cmd.batchSize || 1000,
|
||||
currentLimit: 0,
|
||||
// Result field name if not a cursor (contains the array of results)
|
||||
transforms: options.transforms,
|
||||
raw: options.raw || (cmd && cmd.raw)
|
||||
};
|
||||
|
||||
if (typeof options.session === 'object') {
|
||||
this.cursorState.session = options.session;
|
||||
}
|
||||
|
||||
// Add promoteLong to cursor state
|
||||
const topologyOptions = topology.s.options;
|
||||
if (typeof topologyOptions.promoteLongs === 'boolean') {
|
||||
this.cursorState.promoteLongs = topologyOptions.promoteLongs;
|
||||
} else if (typeof options.promoteLongs === 'boolean') {
|
||||
this.cursorState.promoteLongs = options.promoteLongs;
|
||||
}
|
||||
|
||||
// Add promoteValues to cursor state
|
||||
if (typeof topologyOptions.promoteValues === 'boolean') {
|
||||
this.cursorState.promoteValues = topologyOptions.promoteValues;
|
||||
} else if (typeof options.promoteValues === 'boolean') {
|
||||
this.cursorState.promoteValues = options.promoteValues;
|
||||
}
|
||||
|
||||
// Add promoteBuffers to cursor state
|
||||
if (typeof topologyOptions.promoteBuffers === 'boolean') {
|
||||
this.cursorState.promoteBuffers = topologyOptions.promoteBuffers;
|
||||
} else if (typeof options.promoteBuffers === 'boolean') {
|
||||
this.cursorState.promoteBuffers = options.promoteBuffers;
|
||||
}
|
||||
|
||||
if (topologyOptions.reconnect) {
|
||||
this.cursorState.reconnect = topologyOptions.reconnect;
|
||||
}
|
||||
|
||||
// Logger
|
||||
this.logger = Logger('Cursor', topologyOptions);
|
||||
|
||||
//
|
||||
// Did we pass in a cursor id
|
||||
if (typeof cmd === 'number') {
|
||||
this.cursorState.cursorId = Long.fromNumber(cmd);
|
||||
this.cursorState.lastCursorId = this.cursorState.cursorId;
|
||||
} else if (cmd instanceof Long) {
|
||||
this.cursorState.cursorId = cmd;
|
||||
this.cursorState.lastCursorId = cmd;
|
||||
}
|
||||
|
||||
// TODO: remove as part of NODE-2104
|
||||
if (this.operation) {
|
||||
this.operation.cursorState = this.cursorState;
|
||||
}
|
||||
}
|
||||
|
||||
setCursorBatchSize(value) {
|
||||
this.cursorState.batchSize = value;
|
||||
}
|
||||
|
||||
cursorBatchSize() {
|
||||
return this.cursorState.batchSize;
|
||||
}
|
||||
|
||||
setCursorLimit(value) {
|
||||
this.cursorState.limit = value;
|
||||
}
|
||||
|
||||
cursorLimit() {
|
||||
return this.cursorState.limit;
|
||||
}
|
||||
|
||||
setCursorSkip(value) {
|
||||
this.cursorState.skip = value;
|
||||
}
|
||||
|
||||
cursorSkip() {
|
||||
return this.cursorState.skip;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the next document from the cursor
|
||||
* @method
|
||||
* @param {resultCallback} callback A callback function
|
||||
*/
|
||||
_next(callback) {
|
||||
nextFunction(this, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clone the cursor
|
||||
* @method
|
||||
* @return {Cursor}
|
||||
*/
|
||||
clone() {
|
||||
return this.topology.cursor(this.ns, this.cmd, this.options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the cursor is dead
|
||||
* @method
|
||||
* @return {boolean} A boolean signifying if the cursor is dead or not
|
||||
*/
|
||||
isDead() {
|
||||
return this.cursorState.dead === true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the cursor was killed by the application
|
||||
* @method
|
||||
* @return {boolean} A boolean signifying if the cursor was killed by the application
|
||||
*/
|
||||
isKilled() {
|
||||
return this.cursorState.killed === true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the cursor notified it's caller about it's death
|
||||
* @method
|
||||
* @return {boolean} A boolean signifying if the cursor notified the callback
|
||||
*/
|
||||
isNotified() {
|
||||
return this.cursorState.notified === true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns current buffered documents length
|
||||
* @method
|
||||
* @return {number} The number of items in the buffered documents
|
||||
*/
|
||||
bufferedCount() {
|
||||
return this.cursorState.documents.length - this.cursorState.cursorIndex;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns current buffered documents
|
||||
* @method
|
||||
* @return {Array} An array of buffered documents
|
||||
*/
|
||||
readBufferedDocuments(number) {
|
||||
const unreadDocumentsLength = this.cursorState.documents.length - this.cursorState.cursorIndex;
|
||||
const length = number < unreadDocumentsLength ? number : unreadDocumentsLength;
|
||||
let elements = this.cursorState.documents.slice(
|
||||
this.cursorState.cursorIndex,
|
||||
this.cursorState.cursorIndex + length
|
||||
);
|
||||
|
||||
// Transform the doc with passed in transformation method if provided
|
||||
if (this.cursorState.transforms && typeof this.cursorState.transforms.doc === 'function') {
|
||||
// Transform all the elements
|
||||
for (let i = 0; i < elements.length; i++) {
|
||||
elements[i] = this.cursorState.transforms.doc(elements[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure we do not return any more documents than the limit imposed
|
||||
// Just return the number of elements up to the limit
|
||||
if (
|
||||
this.cursorState.limit > 0 &&
|
||||
this.cursorState.currentLimit + elements.length > this.cursorState.limit
|
||||
) {
|
||||
elements = elements.slice(0, this.cursorState.limit - this.cursorState.currentLimit);
|
||||
this.kill();
|
||||
}
|
||||
|
||||
// Adjust current limit
|
||||
this.cursorState.currentLimit = this.cursorState.currentLimit + elements.length;
|
||||
this.cursorState.cursorIndex = this.cursorState.cursorIndex + elements.length;
|
||||
|
||||
// Return elements
|
||||
return elements;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resets local state for this cursor instance, and issues a `killCursors` command to the server
|
||||
*
|
||||
* @param {resultCallback} callback A callback function
|
||||
*/
|
||||
kill(callback) {
|
||||
// Set cursor to dead
|
||||
this.cursorState.dead = true;
|
||||
this.cursorState.killed = true;
|
||||
// Remove documents
|
||||
this.cursorState.documents = [];
|
||||
|
||||
// If no cursor id just return
|
||||
if (
|
||||
this.cursorState.cursorId == null ||
|
||||
this.cursorState.cursorId.isZero() ||
|
||||
this.cursorState.init === false
|
||||
) {
|
||||
if (callback) callback(null, null);
|
||||
return;
|
||||
}
|
||||
|
||||
this.server.killCursors(this.ns, this.cursorState, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resets the cursor
|
||||
*/
|
||||
rewind() {
|
||||
if (this.cursorState.init) {
|
||||
if (!this.cursorState.dead) {
|
||||
this.kill();
|
||||
}
|
||||
|
||||
this.cursorState.currentLimit = 0;
|
||||
this.cursorState.init = false;
|
||||
this.cursorState.dead = false;
|
||||
this.cursorState.killed = false;
|
||||
this.cursorState.notified = false;
|
||||
this.cursorState.documents = [];
|
||||
this.cursorState.cursorId = null;
|
||||
this.cursorState.cursorIndex = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Internal methods
|
||||
_read() {
|
||||
if ((this.s && this.s.state === CursorState.CLOSED) || this.isDead()) {
|
||||
return this.push(null);
|
||||
}
|
||||
|
||||
// Get the next item
|
||||
this._next((err, result) => {
|
||||
if (err) {
|
||||
if (this.listeners('error') && this.listeners('error').length > 0) {
|
||||
this.emit('error', err);
|
||||
}
|
||||
if (!this.isDead()) this.close();
|
||||
|
||||
// Emit end event
|
||||
this.emit('end');
|
||||
return this.emit('finish');
|
||||
}
|
||||
|
||||
// If we provided a transformation method
|
||||
if (
|
||||
this.cursorState.streamOptions &&
|
||||
typeof this.cursorState.streamOptions.transform === 'function' &&
|
||||
result != null
|
||||
) {
|
||||
return this.push(this.cursorState.streamOptions.transform(result));
|
||||
}
|
||||
|
||||
// Return the result
|
||||
this.push(result);
|
||||
|
||||
if (result === null && this.isDead()) {
|
||||
this.once('end', () => {
|
||||
this.close();
|
||||
this.emit('finish');
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
_endSession(options, callback) {
|
||||
if (typeof options === 'function') {
|
||||
callback = options;
|
||||
options = {};
|
||||
}
|
||||
options = options || {};
|
||||
|
||||
const session = this.cursorState.session;
|
||||
|
||||
if (session && (options.force || session.owner === this)) {
|
||||
this.cursorState.session = undefined;
|
||||
|
||||
if (this.operation) {
|
||||
this.operation.clearSession();
|
||||
}
|
||||
|
||||
session.endSession(callback);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (callback) {
|
||||
callback();
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
_getMore(callback) {
|
||||
if (this.logger.isDebug()) {
|
||||
this.logger.debug(`schedule getMore call for query [${JSON.stringify(this.query)}]`);
|
||||
}
|
||||
|
||||
// Set the current batchSize
|
||||
let batchSize = this.cursorState.batchSize;
|
||||
if (
|
||||
this.cursorState.limit > 0 &&
|
||||
this.cursorState.currentLimit + batchSize > this.cursorState.limit
|
||||
) {
|
||||
batchSize = this.cursorState.limit - this.cursorState.currentLimit;
|
||||
}
|
||||
|
||||
const cursorState = this.cursorState;
|
||||
this.server.getMore(this.ns, cursorState, batchSize, this.options, (err, result, conn) => {
|
||||
// NOTE: `getMore` modifies `cursorState`, would be very ideal not to do so in the future
|
||||
if (err || (cursorState.cursorId && cursorState.cursorId.isZero())) {
|
||||
this._endSession();
|
||||
}
|
||||
|
||||
callback(err, result, conn);
|
||||
});
|
||||
}
|
||||
|
||||
_initializeCursor(callback) {
|
||||
const cursor = this;
|
||||
|
||||
// NOTE: this goes away once cursors use `executeOperation`
|
||||
if (isUnifiedTopology(cursor.topology) && cursor.topology.shouldCheckForSessionSupport()) {
|
||||
cursor.topology.selectServer(ReadPreference.primaryPreferred, err => {
|
||||
if (err) {
|
||||
callback(err);
|
||||
return;
|
||||
}
|
||||
|
||||
this._initializeCursor(callback);
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
function done(err, result) {
|
||||
const cursorState = cursor.cursorState;
|
||||
if (err || (cursorState.cursorId && cursorState.cursorId.isZero())) {
|
||||
cursor._endSession();
|
||||
}
|
||||
|
||||
if (
|
||||
cursorState.documents.length === 0 &&
|
||||
cursorState.cursorId &&
|
||||
cursorState.cursorId.isZero() &&
|
||||
!cursor.cmd.tailable &&
|
||||
!cursor.cmd.awaitData
|
||||
) {
|
||||
return setCursorNotified(cursor, callback);
|
||||
}
|
||||
|
||||
callback(err, result);
|
||||
}
|
||||
|
||||
const queryCallback = (err, r) => {
|
||||
if (err) {
|
||||
return done(err);
|
||||
}
|
||||
|
||||
const result = r.message;
|
||||
|
||||
if (Array.isArray(result.documents) && result.documents.length === 1) {
|
||||
const document = result.documents[0];
|
||||
|
||||
if (result.queryFailure) {
|
||||
return done(new MongoError(document), null);
|
||||
}
|
||||
|
||||
// Check if we have a command cursor
|
||||
if (!cursor.cmd.find || (cursor.cmd.find && cursor.cmd.virtual === false)) {
|
||||
// We have an error document, return the error
|
||||
if (document.$err || document.errmsg) {
|
||||
return done(new MongoError(document), null);
|
||||
}
|
||||
|
||||
// We have a cursor document
|
||||
if (document.cursor != null && typeof document.cursor !== 'string') {
|
||||
const id = document.cursor.id;
|
||||
// If we have a namespace change set the new namespace for getmores
|
||||
if (document.cursor.ns) {
|
||||
cursor.ns = document.cursor.ns;
|
||||
}
|
||||
// Promote id to long if needed
|
||||
cursor.cursorState.cursorId = typeof id === 'number' ? Long.fromNumber(id) : id;
|
||||
cursor.cursorState.lastCursorId = cursor.cursorState.cursorId;
|
||||
cursor.cursorState.operationTime = document.operationTime;
|
||||
|
||||
// If we have a firstBatch set it
|
||||
if (Array.isArray(document.cursor.firstBatch)) {
|
||||
cursor.cursorState.documents = document.cursor.firstBatch; //.reverse();
|
||||
}
|
||||
|
||||
// Return after processing command cursor
|
||||
return done(null, result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise fall back to regular find path
|
||||
const cursorId = result.cursorId || 0;
|
||||
cursor.cursorState.cursorId = cursorId instanceof Long ? cursorId : Long.fromNumber(cursorId);
|
||||
cursor.cursorState.documents = result.documents;
|
||||
cursor.cursorState.lastCursorId = result.cursorId;
|
||||
|
||||
// Transform the results with passed in transformation method if provided
|
||||
if (
|
||||
cursor.cursorState.transforms &&
|
||||
typeof cursor.cursorState.transforms.query === 'function'
|
||||
) {
|
||||
cursor.cursorState.documents = cursor.cursorState.transforms.query(result);
|
||||
}
|
||||
|
||||
done(null, result);
|
||||
};
|
||||
|
||||
if (cursor.operation) {
|
||||
if (cursor.logger.isDebug()) {
|
||||
cursor.logger.debug(
|
||||
`issue initial query [${JSON.stringify(cursor.cmd)}] with flags [${JSON.stringify(
|
||||
cursor.query
|
||||
)}]`
|
||||
);
|
||||
}
|
||||
|
||||
executeOperation(cursor.topology, cursor.operation, (err, result) => {
|
||||
if (err) {
|
||||
done(err);
|
||||
return;
|
||||
}
|
||||
|
||||
cursor.server = cursor.operation.server;
|
||||
cursor.cursorState.init = true;
|
||||
|
||||
// NOTE: this is a special internal method for cloning a cursor, consider removing
|
||||
if (cursor.cursorState.cursorId != null) {
|
||||
return done();
|
||||
}
|
||||
|
||||
queryCallback(err, result);
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Very explicitly choose what is passed to selectServer
|
||||
const serverSelectOptions = {};
|
||||
if (cursor.cursorState.session) {
|
||||
serverSelectOptions.session = cursor.cursorState.session;
|
||||
}
|
||||
|
||||
if (cursor.operation) {
|
||||
serverSelectOptions.readPreference = cursor.operation.readPreference;
|
||||
} else if (cursor.options.readPreference) {
|
||||
serverSelectOptions.readPreference = cursor.options.readPreference;
|
||||
}
|
||||
|
||||
return cursor.topology.selectServer(serverSelectOptions, (err, server) => {
|
||||
if (err) {
|
||||
const disconnectHandler = cursor.disconnectHandler;
|
||||
if (disconnectHandler != null) {
|
||||
return disconnectHandler.addObjectAndMethod(
|
||||
'cursor',
|
||||
cursor,
|
||||
'next',
|
||||
[callback],
|
||||
callback
|
||||
);
|
||||
}
|
||||
|
||||
return callback(err);
|
||||
}
|
||||
|
||||
cursor.server = server;
|
||||
cursor.cursorState.init = true;
|
||||
if (collationNotSupported(cursor.server, cursor.cmd)) {
|
||||
return callback(new MongoError(`server ${cursor.server.name} does not support collation`));
|
||||
}
|
||||
|
||||
// NOTE: this is a special internal method for cloning a cursor, consider removing
|
||||
if (cursor.cursorState.cursorId != null) {
|
||||
return done();
|
||||
}
|
||||
|
||||
if (cursor.logger.isDebug()) {
|
||||
cursor.logger.debug(
|
||||
`issue initial query [${JSON.stringify(cursor.cmd)}] with flags [${JSON.stringify(
|
||||
cursor.query
|
||||
)}]`
|
||||
);
|
||||
}
|
||||
|
||||
if (cursor.cmd.find != null) {
|
||||
server.query(cursor.ns, cursor.cmd, cursor.cursorState, cursor.options, queryCallback);
|
||||
return;
|
||||
}
|
||||
|
||||
const commandOptions = Object.assign({ session: cursor.cursorState.session }, cursor.options);
|
||||
server.command(cursor.ns, cursor.cmd, commandOptions, queryCallback);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (SUPPORTS.ASYNC_ITERATOR) {
|
||||
CoreCursor.prototype[Symbol.asyncIterator] = require('../async/async_iterator').asyncIterator;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate if the pool is dead and return error
|
||||
*/
|
||||
function isConnectionDead(self, callback) {
|
||||
if (self.pool && self.pool.isDestroyed()) {
|
||||
self.cursorState.killed = true;
|
||||
const err = new MongoNetworkError(
|
||||
`connection to host ${self.pool.host}:${self.pool.port} was destroyed`
|
||||
);
|
||||
|
||||
_setCursorNotifiedImpl(self, () => callback(err));
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate if the cursor is dead but was not explicitly killed by user
|
||||
*/
|
||||
function isCursorDeadButNotkilled(self, callback) {
|
||||
// Cursor is dead but not marked killed, return null
|
||||
if (self.cursorState.dead && !self.cursorState.killed) {
|
||||
self.cursorState.killed = true;
|
||||
setCursorNotified(self, callback);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate if the cursor is dead and was killed by user
|
||||
*/
|
||||
function isCursorDeadAndKilled(self, callback) {
|
||||
if (self.cursorState.dead && self.cursorState.killed) {
|
||||
handleCallback(callback, new MongoError('cursor is dead'));
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate if the cursor was killed by the user
|
||||
*/
|
||||
function isCursorKilled(self, callback) {
|
||||
if (self.cursorState.killed) {
|
||||
setCursorNotified(self, callback);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark cursor as being dead and notified
|
||||
*/
|
||||
function setCursorDeadAndNotified(self, callback) {
|
||||
self.cursorState.dead = true;
|
||||
setCursorNotified(self, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark cursor as being notified
|
||||
*/
|
||||
function setCursorNotified(self, callback) {
|
||||
_setCursorNotifiedImpl(self, () => handleCallback(callback, null, null));
|
||||
}
|
||||
|
||||
function _setCursorNotifiedImpl(self, callback) {
|
||||
self.cursorState.notified = true;
|
||||
self.cursorState.documents = [];
|
||||
self.cursorState.cursorIndex = 0;
|
||||
|
||||
if (self.cursorState.session) {
|
||||
self._endSession(callback);
|
||||
return;
|
||||
}
|
||||
|
||||
return callback();
|
||||
}
|
||||
|
||||
function nextFunction(self, callback) {
|
||||
// We have notified about it
|
||||
if (self.cursorState.notified) {
|
||||
return callback(new Error('cursor is exhausted'));
|
||||
}
|
||||
|
||||
// Cursor is killed return null
|
||||
if (isCursorKilled(self, callback)) return;
|
||||
|
||||
// Cursor is dead but not marked killed, return null
|
||||
if (isCursorDeadButNotkilled(self, callback)) return;
|
||||
|
||||
// We have a dead and killed cursor, attempting to call next should error
|
||||
if (isCursorDeadAndKilled(self, callback)) return;
|
||||
|
||||
// We have just started the cursor
|
||||
if (!self.cursorState.init) {
|
||||
// Topology is not connected, save the call in the provided store to be
|
||||
// Executed at some point when the handler deems it's reconnected
|
||||
if (!self.topology.isConnected(self.options)) {
|
||||
// Only need this for single server, because repl sets and mongos
|
||||
// will always continue trying to reconnect
|
||||
if (self.topology._type === 'server' && !self.topology.s.options.reconnect) {
|
||||
// Reconnect is disabled, so we'll never reconnect
|
||||
return callback(new MongoError('no connection available'));
|
||||
}
|
||||
|
||||
if (self.disconnectHandler != null) {
|
||||
if (self.topology.isDestroyed()) {
|
||||
// Topology was destroyed, so don't try to wait for it to reconnect
|
||||
return callback(new MongoError('Topology was destroyed'));
|
||||
}
|
||||
|
||||
self.disconnectHandler.addObjectAndMethod('cursor', self, 'next', [callback], callback);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
self._initializeCursor((err, result) => {
|
||||
if (err || result === null) {
|
||||
callback(err, result);
|
||||
return;
|
||||
}
|
||||
|
||||
nextFunction(self, callback);
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (self.cursorState.limit > 0 && self.cursorState.currentLimit >= self.cursorState.limit) {
|
||||
// Ensure we kill the cursor on the server
|
||||
self.kill(() =>
|
||||
// Set cursor in dead and notified state
|
||||
setCursorDeadAndNotified(self, callback)
|
||||
);
|
||||
} else if (
|
||||
self.cursorState.cursorIndex === self.cursorState.documents.length &&
|
||||
!Long.ZERO.equals(self.cursorState.cursorId)
|
||||
) {
|
||||
// Ensure an empty cursor state
|
||||
self.cursorState.documents = [];
|
||||
self.cursorState.cursorIndex = 0;
|
||||
|
||||
// Check if topology is destroyed
|
||||
if (self.topology.isDestroyed())
|
||||
return callback(
|
||||
new MongoNetworkError('connection destroyed, not possible to instantiate cursor')
|
||||
);
|
||||
|
||||
// Check if connection is dead and return if not possible to
|
||||
// execute a getMore on this connection
|
||||
if (isConnectionDead(self, callback)) return;
|
||||
|
||||
// Execute the next get more
|
||||
self._getMore(function(err, doc, connection) {
|
||||
if (err) {
|
||||
return handleCallback(callback, err);
|
||||
}
|
||||
|
||||
// Save the returned connection to ensure all getMore's fire over the same connection
|
||||
self.connection = connection;
|
||||
|
||||
// Tailable cursor getMore result, notify owner about it
|
||||
// No attempt is made here to retry, this is left to the user of the
|
||||
// core module to handle to keep core simple
|
||||
if (
|
||||
self.cursorState.documents.length === 0 &&
|
||||
self.cmd.tailable &&
|
||||
Long.ZERO.equals(self.cursorState.cursorId)
|
||||
) {
|
||||
// No more documents in the tailed cursor
|
||||
return handleCallback(
|
||||
callback,
|
||||
new MongoError({
|
||||
message: 'No more documents in tailed cursor',
|
||||
tailable: self.cmd.tailable,
|
||||
awaitData: self.cmd.awaitData
|
||||
})
|
||||
);
|
||||
} else if (
|
||||
self.cursorState.documents.length === 0 &&
|
||||
self.cmd.tailable &&
|
||||
!Long.ZERO.equals(self.cursorState.cursorId)
|
||||
) {
|
||||
return nextFunction(self, callback);
|
||||
}
|
||||
|
||||
if (self.cursorState.limit > 0 && self.cursorState.currentLimit >= self.cursorState.limit) {
|
||||
return setCursorDeadAndNotified(self, callback);
|
||||
}
|
||||
|
||||
nextFunction(self, callback);
|
||||
});
|
||||
} else if (
|
||||
self.cursorState.documents.length === self.cursorState.cursorIndex &&
|
||||
self.cmd.tailable &&
|
||||
Long.ZERO.equals(self.cursorState.cursorId)
|
||||
) {
|
||||
return handleCallback(
|
||||
callback,
|
||||
new MongoError({
|
||||
message: 'No more documents in tailed cursor',
|
||||
tailable: self.cmd.tailable,
|
||||
awaitData: self.cmd.awaitData
|
||||
})
|
||||
);
|
||||
} else if (
|
||||
self.cursorState.documents.length === self.cursorState.cursorIndex &&
|
||||
Long.ZERO.equals(self.cursorState.cursorId)
|
||||
) {
|
||||
setCursorDeadAndNotified(self, callback);
|
||||
} else {
|
||||
if (self.cursorState.limit > 0 && self.cursorState.currentLimit >= self.cursorState.limit) {
|
||||
// Ensure we kill the cursor on the server
|
||||
self.kill(() =>
|
||||
// Set cursor in dead and notified state
|
||||
setCursorDeadAndNotified(self, callback)
|
||||
);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Increment the current cursor limit
|
||||
self.cursorState.currentLimit += 1;
|
||||
|
||||
// Get the document
|
||||
let doc = self.cursorState.documents[self.cursorState.cursorIndex++];
|
||||
|
||||
// Doc overflow
|
||||
if (!doc || doc.$err) {
|
||||
// Ensure we kill the cursor on the server
|
||||
self.kill(() =>
|
||||
// Set cursor in dead and notified state
|
||||
setCursorDeadAndNotified(self, function() {
|
||||
handleCallback(callback, new MongoError(doc ? doc.$err : undefined));
|
||||
})
|
||||
);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Transform the doc with passed in transformation method if provided
|
||||
if (self.cursorState.transforms && typeof self.cursorState.transforms.doc === 'function') {
|
||||
doc = self.cursorState.transforms.doc(doc);
|
||||
}
|
||||
|
||||
// Return the document
|
||||
handleCallback(callback, null, doc);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
CursorState,
|
||||
CoreCursor
|
||||
};
|
||||
351
node_modules/mongodb/lib/core/error.js
generated
vendored
Normal file
351
node_modules/mongodb/lib/core/error.js
generated
vendored
Normal file
@@ -0,0 +1,351 @@
|
||||
'use strict';
|
||||
|
||||
const kErrorLabels = Symbol('errorLabels');
|
||||
|
||||
/**
|
||||
* Creates a new MongoError
|
||||
*
|
||||
* @augments Error
|
||||
* @param {Error|string|object} message The error message
|
||||
* @property {string} message The error message
|
||||
* @property {string} stack The error call stack
|
||||
*/
|
||||
class MongoError extends Error {
|
||||
constructor(message) {
|
||||
if (message instanceof Error) {
|
||||
super(message.message);
|
||||
this.stack = message.stack;
|
||||
} else {
|
||||
if (typeof message === 'string') {
|
||||
super(message);
|
||||
} else {
|
||||
super(message.message || message.errmsg || message.$err || 'n/a');
|
||||
if (message.errorLabels) {
|
||||
this[kErrorLabels] = new Set(message.errorLabels);
|
||||
}
|
||||
|
||||
for (var name in message) {
|
||||
if (name === 'errorLabels' || name === 'errmsg') {
|
||||
continue;
|
||||
}
|
||||
|
||||
this[name] = message[name];
|
||||
}
|
||||
}
|
||||
|
||||
Error.captureStackTrace(this, this.constructor);
|
||||
}
|
||||
|
||||
this.name = 'MongoError';
|
||||
}
|
||||
|
||||
/**
|
||||
* Legacy name for server error responses
|
||||
*/
|
||||
get errmsg() {
|
||||
return this.message;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new MongoError object
|
||||
*
|
||||
* @param {Error|string|object} options The options used to create the error.
|
||||
* @return {MongoError} A MongoError instance
|
||||
* @deprecated Use `new MongoError()` instead.
|
||||
*/
|
||||
static create(options) {
|
||||
return new MongoError(options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the error to see if it has an error label
|
||||
* @param {string} label The error label to check for
|
||||
* @returns {boolean} returns true if the error has the provided error label
|
||||
*/
|
||||
hasErrorLabel(label) {
|
||||
if (this[kErrorLabels] == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return this[kErrorLabels].has(label);
|
||||
}
|
||||
|
||||
addErrorLabel(label) {
|
||||
if (this[kErrorLabels] == null) {
|
||||
this[kErrorLabels] = new Set();
|
||||
}
|
||||
|
||||
this[kErrorLabels].add(label);
|
||||
}
|
||||
|
||||
get errorLabels() {
|
||||
return this[kErrorLabels] ? Array.from(this[kErrorLabels]) : [];
|
||||
}
|
||||
}
|
||||
|
||||
const kBeforeHandshake = Symbol('beforeHandshake');
|
||||
function isNetworkErrorBeforeHandshake(err) {
|
||||
return err[kBeforeHandshake] === true;
|
||||
}
|
||||
|
||||
/**
|
||||
* An error indicating an issue with the network, including TCP
|
||||
* errors and timeouts.
|
||||
*
|
||||
* @param {Error|string|object} message The error message
|
||||
* @property {string} message The error message
|
||||
* @property {string} stack The error call stack
|
||||
* @extends MongoError
|
||||
*/
|
||||
class MongoNetworkError extends MongoError {
|
||||
constructor(message, options) {
|
||||
super(message);
|
||||
this.name = 'MongoNetworkError';
|
||||
|
||||
if (options && options.beforeHandshake === true) {
|
||||
this[kBeforeHandshake] = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* An error indicating a network timeout occurred
|
||||
*
|
||||
* @param {Error|string|object} message The error message
|
||||
* @property {string} message The error message
|
||||
* @property {object} [options.beforeHandshake] Indicates the timeout happened before a connection handshake completed
|
||||
* @extends MongoError
|
||||
*/
|
||||
class MongoNetworkTimeoutError extends MongoNetworkError {
|
||||
constructor(message, options) {
|
||||
super(message, options);
|
||||
this.name = 'MongoNetworkTimeoutError';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* An error used when attempting to parse a value (like a connection string)
|
||||
*
|
||||
* @param {Error|string|object} message The error message
|
||||
* @property {string} message The error message
|
||||
* @extends MongoError
|
||||
*/
|
||||
class MongoParseError extends MongoError {
|
||||
constructor(message) {
|
||||
super(message);
|
||||
this.name = 'MongoParseError';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* An error signifying a client-side timeout event
|
||||
*
|
||||
* @param {Error|string|object} message The error message
|
||||
* @param {string|object} [reason] The reason the timeout occured
|
||||
* @property {string} message The error message
|
||||
* @property {string} [reason] An optional reason context for the timeout, generally an error saved during flow of monitoring and selecting servers
|
||||
* @extends MongoError
|
||||
*/
|
||||
class MongoTimeoutError extends MongoError {
|
||||
constructor(message, reason) {
|
||||
if (reason && reason.error) {
|
||||
super(reason.error.message || reason.error);
|
||||
} else {
|
||||
super(message);
|
||||
}
|
||||
|
||||
this.name = 'MongoTimeoutError';
|
||||
if (reason) {
|
||||
this.reason = reason;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* An error signifying a client-side server selection error
|
||||
*
|
||||
* @param {Error|string|object} message The error message
|
||||
* @param {string|object} [reason] The reason the timeout occured
|
||||
* @property {string} message The error message
|
||||
* @property {string} [reason] An optional reason context for the timeout, generally an error saved during flow of monitoring and selecting servers
|
||||
* @extends MongoError
|
||||
*/
|
||||
class MongoServerSelectionError extends MongoTimeoutError {
|
||||
constructor(message, reason) {
|
||||
super(message, reason);
|
||||
this.name = 'MongoServerSelectionError';
|
||||
}
|
||||
}
|
||||
|
||||
function makeWriteConcernResultObject(input) {
|
||||
const output = Object.assign({}, input);
|
||||
|
||||
if (output.ok === 0) {
|
||||
output.ok = 1;
|
||||
delete output.errmsg;
|
||||
delete output.code;
|
||||
delete output.codeName;
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
/**
|
||||
* An error thrown when the server reports a writeConcernError
|
||||
*
|
||||
* @param {Error|string|object} message The error message
|
||||
* @param {object} result The result document (provided if ok: 1)
|
||||
* @property {string} message The error message
|
||||
* @property {object} [result] The result document (provided if ok: 1)
|
||||
* @extends MongoError
|
||||
*/
|
||||
class MongoWriteConcernError extends MongoError {
|
||||
constructor(message, result) {
|
||||
super(message);
|
||||
this.name = 'MongoWriteConcernError';
|
||||
|
||||
if (result && Array.isArray(result.errorLabels)) {
|
||||
this[kErrorLabels] = new Set(result.errorLabels);
|
||||
}
|
||||
|
||||
if (result != null) {
|
||||
this.result = makeWriteConcernResultObject(result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// see: https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst#terms
|
||||
const RETRYABLE_ERROR_CODES = new Set([
|
||||
6, // HostUnreachable
|
||||
7, // HostNotFound
|
||||
89, // NetworkTimeout
|
||||
91, // ShutdownInProgress
|
||||
189, // PrimarySteppedDown
|
||||
9001, // SocketException
|
||||
10107, // NotMaster
|
||||
11600, // InterruptedAtShutdown
|
||||
11602, // InterruptedDueToReplStateChange
|
||||
13435, // NotMasterNoSlaveOk
|
||||
13436 // NotMasterOrSecondary
|
||||
]);
|
||||
|
||||
const RETRYABLE_WRITE_ERROR_CODES = new Set([
|
||||
11600, // InterruptedAtShutdown
|
||||
11602, // InterruptedDueToReplStateChange
|
||||
10107, // NotMaster
|
||||
13435, // NotMasterNoSlaveOk
|
||||
13436, // NotMasterOrSecondary
|
||||
189, // PrimarySteppedDown
|
||||
91, // ShutdownInProgress
|
||||
7, // HostNotFound
|
||||
6, // HostUnreachable
|
||||
89, // NetworkTimeout
|
||||
9001, // SocketException
|
||||
262 // ExceededTimeLimit
|
||||
]);
|
||||
|
||||
function isRetryableWriteError(error) {
|
||||
if (error instanceof MongoWriteConcernError) {
|
||||
return (
|
||||
RETRYABLE_WRITE_ERROR_CODES.has(error.code) ||
|
||||
RETRYABLE_WRITE_ERROR_CODES.has(error.result.code)
|
||||
);
|
||||
}
|
||||
|
||||
return RETRYABLE_WRITE_ERROR_CODES.has(error.code);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines whether an error is something the driver should attempt to retry
|
||||
*
|
||||
* @ignore
|
||||
* @param {MongoError|Error} error
|
||||
*/
|
||||
function isRetryableError(error) {
|
||||
return (
|
||||
RETRYABLE_ERROR_CODES.has(error.code) ||
|
||||
error instanceof MongoNetworkError ||
|
||||
error.message.match(/not master/) ||
|
||||
error.message.match(/node is recovering/)
|
||||
);
|
||||
}
|
||||
|
||||
const SDAM_RECOVERING_CODES = new Set([
|
||||
91, // ShutdownInProgress
|
||||
189, // PrimarySteppedDown
|
||||
11600, // InterruptedAtShutdown
|
||||
11602, // InterruptedDueToReplStateChange
|
||||
13436 // NotMasterOrSecondary
|
||||
]);
|
||||
|
||||
const SDAM_NOTMASTER_CODES = new Set([
|
||||
10107, // NotMaster
|
||||
13435 // NotMasterNoSlaveOk
|
||||
]);
|
||||
|
||||
const SDAM_NODE_SHUTTING_DOWN_ERROR_CODES = new Set([
|
||||
11600, // InterruptedAtShutdown
|
||||
91 // ShutdownInProgress
|
||||
]);
|
||||
|
||||
function isRecoveringError(err) {
|
||||
if (err.code && SDAM_RECOVERING_CODES.has(err.code)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return err.message.match(/not master or secondary/) || err.message.match(/node is recovering/);
|
||||
}
|
||||
|
||||
function isNotMasterError(err) {
|
||||
if (err.code && SDAM_NOTMASTER_CODES.has(err.code)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (isRecoveringError(err)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return err.message.match(/not master/);
|
||||
}
|
||||
|
||||
function isNodeShuttingDownError(err) {
|
||||
return err.code && SDAM_NODE_SHUTTING_DOWN_ERROR_CODES.has(err.code);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines whether SDAM can recover from a given error. If it cannot
|
||||
* then the pool will be cleared, and server state will completely reset
|
||||
* locally.
|
||||
*
|
||||
* @ignore
|
||||
* @see https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#not-master-and-node-is-recovering
|
||||
* @param {MongoError|Error} error
|
||||
*/
|
||||
function isSDAMUnrecoverableError(error) {
|
||||
// NOTE: null check is here for a strictly pre-CMAP world, a timeout or
|
||||
// close event are considered unrecoverable
|
||||
if (error instanceof MongoParseError || error == null) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (isRecoveringError(error) || isNotMasterError(error)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
MongoError,
|
||||
MongoNetworkError,
|
||||
MongoNetworkTimeoutError,
|
||||
MongoParseError,
|
||||
MongoTimeoutError,
|
||||
MongoServerSelectionError,
|
||||
MongoWriteConcernError,
|
||||
isRetryableError,
|
||||
isSDAMUnrecoverableError,
|
||||
isNodeShuttingDownError,
|
||||
isRetryableWriteError,
|
||||
isNetworkErrorBeforeHandshake
|
||||
};
|
||||
50
node_modules/mongodb/lib/core/index.js
generated
vendored
Normal file
50
node_modules/mongodb/lib/core/index.js
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
'use strict';
|
||||
|
||||
let BSON = require('bson');
|
||||
const require_optional = require('require_optional');
|
||||
const EJSON = require('./utils').retrieveEJSON();
|
||||
|
||||
try {
|
||||
// Attempt to grab the native BSON parser
|
||||
const BSONNative = require_optional('bson-ext');
|
||||
// If we got the native parser, use it instead of the
|
||||
// Javascript one
|
||||
if (BSONNative) {
|
||||
BSON = BSONNative;
|
||||
}
|
||||
} catch (err) {} // eslint-disable-line
|
||||
|
||||
module.exports = {
|
||||
// Errors
|
||||
MongoError: require('./error').MongoError,
|
||||
MongoNetworkError: require('./error').MongoNetworkError,
|
||||
MongoParseError: require('./error').MongoParseError,
|
||||
MongoTimeoutError: require('./error').MongoTimeoutError,
|
||||
MongoServerSelectionError: require('./error').MongoServerSelectionError,
|
||||
MongoWriteConcernError: require('./error').MongoWriteConcernError,
|
||||
// Core
|
||||
Connection: require('./connection/connection'),
|
||||
Server: require('./topologies/server'),
|
||||
ReplSet: require('./topologies/replset'),
|
||||
Mongos: require('./topologies/mongos'),
|
||||
Logger: require('./connection/logger'),
|
||||
Cursor: require('./cursor').CoreCursor,
|
||||
ReadPreference: require('./topologies/read_preference'),
|
||||
Sessions: require('./sessions'),
|
||||
BSON: BSON,
|
||||
EJSON: EJSON,
|
||||
Topology: require('./sdam/topology').Topology,
|
||||
// Raw operations
|
||||
Query: require('./connection/commands').Query,
|
||||
// Auth mechanisms
|
||||
MongoCredentials: require('./auth/mongo_credentials').MongoCredentials,
|
||||
defaultAuthProviders: require('./auth/defaultAuthProviders').defaultAuthProviders,
|
||||
MongoCR: require('./auth/mongocr'),
|
||||
X509: require('./auth/x509'),
|
||||
Plain: require('./auth/plain'),
|
||||
GSSAPI: require('./auth/gssapi'),
|
||||
ScramSHA1: require('./auth/scram').ScramSHA1,
|
||||
ScramSHA256: require('./auth/scram').ScramSHA256,
|
||||
// Utilities
|
||||
parseConnectionString: require('./uri_parser')
|
||||
};
|
||||
67
node_modules/mongodb/lib/core/sdam/common.js
generated
vendored
Normal file
67
node_modules/mongodb/lib/core/sdam/common.js
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
'use strict';
|
||||
|
||||
// shared state names
|
||||
const STATE_CLOSING = 'closing';
|
||||
const STATE_CLOSED = 'closed';
|
||||
const STATE_CONNECTING = 'connecting';
|
||||
const STATE_CONNECTED = 'connected';
|
||||
|
||||
// An enumeration of topology types we know about
|
||||
const TopologyType = {
|
||||
Single: 'Single',
|
||||
ReplicaSetNoPrimary: 'ReplicaSetNoPrimary',
|
||||
ReplicaSetWithPrimary: 'ReplicaSetWithPrimary',
|
||||
Sharded: 'Sharded',
|
||||
Unknown: 'Unknown'
|
||||
};
|
||||
|
||||
// An enumeration of server types we know about
|
||||
const ServerType = {
|
||||
Standalone: 'Standalone',
|
||||
Mongos: 'Mongos',
|
||||
PossiblePrimary: 'PossiblePrimary',
|
||||
RSPrimary: 'RSPrimary',
|
||||
RSSecondary: 'RSSecondary',
|
||||
RSArbiter: 'RSArbiter',
|
||||
RSOther: 'RSOther',
|
||||
RSGhost: 'RSGhost',
|
||||
Unknown: 'Unknown'
|
||||
};
|
||||
|
||||
// helper to get a server's type that works for both legacy and unified topologies
|
||||
function serverType(server) {
|
||||
let description = server.s.description || server.s.serverDescription;
|
||||
if (description.topologyType === TopologyType.Single) return description.servers[0].type;
|
||||
return description.type;
|
||||
}
|
||||
|
||||
const TOPOLOGY_DEFAULTS = {
|
||||
useUnifiedTopology: true,
|
||||
localThresholdMS: 15,
|
||||
serverSelectionTimeoutMS: 30000,
|
||||
heartbeatFrequencyMS: 10000,
|
||||
minHeartbeatFrequencyMS: 500
|
||||
};
|
||||
|
||||
function drainTimerQueue(queue) {
|
||||
queue.forEach(clearTimeout);
|
||||
queue.clear();
|
||||
}
|
||||
|
||||
function clearAndRemoveTimerFrom(timer, timers) {
|
||||
clearTimeout(timer);
|
||||
return timers.delete(timer);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
STATE_CLOSING,
|
||||
STATE_CLOSED,
|
||||
STATE_CONNECTING,
|
||||
STATE_CONNECTED,
|
||||
TOPOLOGY_DEFAULTS,
|
||||
TopologyType,
|
||||
ServerType,
|
||||
serverType,
|
||||
drainTimerQueue,
|
||||
clearAndRemoveTimerFrom
|
||||
};
|
||||
124
node_modules/mongodb/lib/core/sdam/events.js
generated
vendored
Normal file
124
node_modules/mongodb/lib/core/sdam/events.js
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
'use strict';
|
||||
|
||||
/**
|
||||
* Published when server description changes, but does NOT include changes to the RTT.
|
||||
*
|
||||
* @property {Object} topologyId A unique identifier for the topology
|
||||
* @property {ServerAddress} address The address (host/port pair) of the server
|
||||
* @property {ServerDescription} previousDescription The previous server description
|
||||
* @property {ServerDescription} newDescription The new server description
|
||||
*/
|
||||
class ServerDescriptionChangedEvent {
|
||||
constructor(topologyId, address, previousDescription, newDescription) {
|
||||
Object.assign(this, { topologyId, address, previousDescription, newDescription });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Published when server is initialized.
|
||||
*
|
||||
* @property {Object} topologyId A unique identifier for the topology
|
||||
* @property {ServerAddress} address The address (host/port pair) of the server
|
||||
*/
|
||||
class ServerOpeningEvent {
|
||||
constructor(topologyId, address) {
|
||||
Object.assign(this, { topologyId, address });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Published when server is closed.
|
||||
*
|
||||
* @property {ServerAddress} address The address (host/port pair) of the server
|
||||
* @property {Object} topologyId A unique identifier for the topology
|
||||
*/
|
||||
class ServerClosedEvent {
|
||||
constructor(topologyId, address) {
|
||||
Object.assign(this, { topologyId, address });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Published when topology description changes.
|
||||
*
|
||||
* @property {Object} topologyId
|
||||
* @property {TopologyDescription} previousDescription The old topology description
|
||||
* @property {TopologyDescription} newDescription The new topology description
|
||||
*/
|
||||
class TopologyDescriptionChangedEvent {
|
||||
constructor(topologyId, previousDescription, newDescription) {
|
||||
Object.assign(this, { topologyId, previousDescription, newDescription });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Published when topology is initialized.
|
||||
*
|
||||
* @param {Object} topologyId A unique identifier for the topology
|
||||
*/
|
||||
class TopologyOpeningEvent {
|
||||
constructor(topologyId) {
|
||||
Object.assign(this, { topologyId });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Published when topology is closed.
|
||||
*
|
||||
* @param {Object} topologyId A unique identifier for the topology
|
||||
*/
|
||||
class TopologyClosedEvent {
|
||||
constructor(topologyId) {
|
||||
Object.assign(this, { topologyId });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fired when the server monitor’s ismaster command is started - immediately before
|
||||
* the ismaster command is serialized into raw BSON and written to the socket.
|
||||
*
|
||||
* @property {Object} connectionId The connection id for the command
|
||||
*/
|
||||
class ServerHeartbeatStartedEvent {
|
||||
constructor(connectionId) {
|
||||
Object.assign(this, { connectionId });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fired when the server monitor’s ismaster succeeds.
|
||||
*
|
||||
* @param {Number} duration The execution time of the event in ms
|
||||
* @param {Object} reply The command reply
|
||||
* @param {Object} connectionId The connection id for the command
|
||||
*/
|
||||
class ServerHeartbeatSucceededEvent {
|
||||
constructor(duration, reply, connectionId) {
|
||||
Object.assign(this, { connectionId, duration, reply });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fired when the server monitor’s ismaster fails, either with an “ok: 0” or a socket exception.
|
||||
*
|
||||
* @param {Number} duration The execution time of the event in ms
|
||||
* @param {MongoError|Object} failure The command failure
|
||||
* @param {Object} connectionId The connection id for the command
|
||||
*/
|
||||
class ServerHeartbeatFailedEvent {
|
||||
constructor(duration, failure, connectionId) {
|
||||
Object.assign(this, { connectionId, duration, failure });
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
ServerDescriptionChangedEvent,
|
||||
ServerOpeningEvent,
|
||||
ServerClosedEvent,
|
||||
TopologyDescriptionChangedEvent,
|
||||
TopologyOpeningEvent,
|
||||
TopologyClosedEvent,
|
||||
ServerHeartbeatStartedEvent,
|
||||
ServerHeartbeatSucceededEvent,
|
||||
ServerHeartbeatFailedEvent
|
||||
};
|
||||
405
node_modules/mongodb/lib/core/sdam/monitor.js
generated
vendored
Normal file
405
node_modules/mongodb/lib/core/sdam/monitor.js
generated
vendored
Normal file
@@ -0,0 +1,405 @@
|
||||
'use strict';
|
||||
|
||||
const ServerType = require('./common').ServerType;
|
||||
const EventEmitter = require('events');
|
||||
const connect = require('../connection/connect');
|
||||
const Connection = require('../../cmap/connection').Connection;
|
||||
const common = require('./common');
|
||||
const makeStateMachine = require('../utils').makeStateMachine;
|
||||
const MongoNetworkError = require('../error').MongoNetworkError;
|
||||
const BSON = require('../connection/utils').retrieveBSON();
|
||||
const makeInterruptableAsyncInterval = require('../../utils').makeInterruptableAsyncInterval;
|
||||
const calculateDurationInMs = require('../../utils').calculateDurationInMs;
|
||||
const now = require('../../utils').now;
|
||||
|
||||
const sdamEvents = require('./events');
|
||||
const ServerHeartbeatStartedEvent = sdamEvents.ServerHeartbeatStartedEvent;
|
||||
const ServerHeartbeatSucceededEvent = sdamEvents.ServerHeartbeatSucceededEvent;
|
||||
const ServerHeartbeatFailedEvent = sdamEvents.ServerHeartbeatFailedEvent;
|
||||
|
||||
const kServer = Symbol('server');
|
||||
const kMonitorId = Symbol('monitorId');
|
||||
const kConnection = Symbol('connection');
|
||||
const kCancellationToken = Symbol('cancellationToken');
|
||||
const kRTTPinger = Symbol('rttPinger');
|
||||
const kRoundTripTime = Symbol('roundTripTime');
|
||||
|
||||
const STATE_CLOSED = common.STATE_CLOSED;
|
||||
const STATE_CLOSING = common.STATE_CLOSING;
|
||||
const STATE_IDLE = 'idle';
|
||||
const STATE_MONITORING = 'monitoring';
|
||||
const stateTransition = makeStateMachine({
|
||||
[STATE_CLOSING]: [STATE_CLOSING, STATE_IDLE, STATE_CLOSED],
|
||||
[STATE_CLOSED]: [STATE_CLOSED, STATE_MONITORING],
|
||||
[STATE_IDLE]: [STATE_IDLE, STATE_MONITORING, STATE_CLOSING],
|
||||
[STATE_MONITORING]: [STATE_MONITORING, STATE_IDLE, STATE_CLOSING]
|
||||
});
|
||||
|
||||
const INVALID_REQUEST_CHECK_STATES = new Set([STATE_CLOSING, STATE_CLOSED, STATE_MONITORING]);
|
||||
|
||||
function isInCloseState(monitor) {
|
||||
return monitor.s.state === STATE_CLOSED || monitor.s.state === STATE_CLOSING;
|
||||
}
|
||||
|
||||
class Monitor extends EventEmitter {
|
||||
constructor(server, options) {
|
||||
super(options);
|
||||
|
||||
this[kServer] = server;
|
||||
this[kConnection] = undefined;
|
||||
this[kCancellationToken] = new EventEmitter();
|
||||
this[kCancellationToken].setMaxListeners(Infinity);
|
||||
this[kMonitorId] = null;
|
||||
this.s = {
|
||||
state: STATE_CLOSED
|
||||
};
|
||||
|
||||
this.address = server.description.address;
|
||||
this.options = Object.freeze({
|
||||
connectTimeoutMS:
|
||||
typeof options.connectionTimeout === 'number'
|
||||
? options.connectionTimeout
|
||||
: typeof options.connectTimeoutMS === 'number'
|
||||
? options.connectTimeoutMS
|
||||
: 10000,
|
||||
heartbeatFrequencyMS:
|
||||
typeof options.heartbeatFrequencyMS === 'number' ? options.heartbeatFrequencyMS : 10000,
|
||||
minHeartbeatFrequencyMS:
|
||||
typeof options.minHeartbeatFrequencyMS === 'number' ? options.minHeartbeatFrequencyMS : 500
|
||||
});
|
||||
|
||||
// TODO: refactor this to pull it directly from the pool, requires new ConnectionPool integration
|
||||
const connectOptions = Object.assign(
|
||||
{
|
||||
id: '<monitor>',
|
||||
host: server.description.host,
|
||||
port: server.description.port,
|
||||
bson: server.s.bson,
|
||||
connectionType: Connection
|
||||
},
|
||||
server.s.options,
|
||||
this.options,
|
||||
|
||||
// force BSON serialization options
|
||||
{
|
||||
raw: false,
|
||||
promoteLongs: true,
|
||||
promoteValues: true,
|
||||
promoteBuffers: true
|
||||
}
|
||||
);
|
||||
|
||||
// ensure no authentication is used for monitoring
|
||||
delete connectOptions.credentials;
|
||||
this.connectOptions = Object.freeze(connectOptions);
|
||||
}
|
||||
|
||||
connect() {
|
||||
if (this.s.state !== STATE_CLOSED) {
|
||||
return;
|
||||
}
|
||||
|
||||
// start
|
||||
const heartbeatFrequencyMS = this.options.heartbeatFrequencyMS;
|
||||
const minHeartbeatFrequencyMS = this.options.minHeartbeatFrequencyMS;
|
||||
this[kMonitorId] = makeInterruptableAsyncInterval(monitorServer(this), {
|
||||
interval: heartbeatFrequencyMS,
|
||||
minInterval: minHeartbeatFrequencyMS,
|
||||
immediate: true
|
||||
});
|
||||
}
|
||||
|
||||
requestCheck() {
|
||||
if (INVALID_REQUEST_CHECK_STATES.has(this.s.state)) {
|
||||
return;
|
||||
}
|
||||
|
||||
this[kMonitorId].wake();
|
||||
}
|
||||
|
||||
reset() {
|
||||
if (isInCloseState(this)) {
|
||||
return;
|
||||
}
|
||||
|
||||
stateTransition(this, STATE_CLOSING);
|
||||
resetMonitorState(this);
|
||||
|
||||
// restart monitor
|
||||
stateTransition(this, STATE_IDLE);
|
||||
|
||||
// restart monitoring
|
||||
const heartbeatFrequencyMS = this.options.heartbeatFrequencyMS;
|
||||
const minHeartbeatFrequencyMS = this.options.minHeartbeatFrequencyMS;
|
||||
this[kMonitorId] = makeInterruptableAsyncInterval(monitorServer(this), {
|
||||
interval: heartbeatFrequencyMS,
|
||||
minInterval: minHeartbeatFrequencyMS
|
||||
});
|
||||
}
|
||||
|
||||
close() {
|
||||
if (isInCloseState(this)) {
|
||||
return;
|
||||
}
|
||||
|
||||
stateTransition(this, STATE_CLOSING);
|
||||
resetMonitorState(this);
|
||||
|
||||
// close monitor
|
||||
this.emit('close');
|
||||
stateTransition(this, STATE_CLOSED);
|
||||
}
|
||||
}
|
||||
|
||||
function resetMonitorState(monitor) {
|
||||
stateTransition(monitor, STATE_CLOSING);
|
||||
if (monitor[kMonitorId]) {
|
||||
monitor[kMonitorId].stop();
|
||||
monitor[kMonitorId] = null;
|
||||
}
|
||||
|
||||
if (monitor[kRTTPinger]) {
|
||||
monitor[kRTTPinger].close();
|
||||
monitor[kRTTPinger] = undefined;
|
||||
}
|
||||
|
||||
monitor[kCancellationToken].emit('cancel');
|
||||
if (monitor[kMonitorId]) {
|
||||
clearTimeout(monitor[kMonitorId]);
|
||||
monitor[kMonitorId] = undefined;
|
||||
}
|
||||
|
||||
if (monitor[kConnection]) {
|
||||
monitor[kConnection].destroy({ force: true });
|
||||
}
|
||||
}
|
||||
|
||||
function checkServer(monitor, callback) {
|
||||
let start = now();
|
||||
monitor.emit('serverHeartbeatStarted', new ServerHeartbeatStartedEvent(monitor.address));
|
||||
|
||||
function failureHandler(err) {
|
||||
if (monitor[kConnection]) {
|
||||
monitor[kConnection].destroy({ force: true });
|
||||
monitor[kConnection] = undefined;
|
||||
}
|
||||
|
||||
monitor.emit(
|
||||
'serverHeartbeatFailed',
|
||||
new ServerHeartbeatFailedEvent(calculateDurationInMs(start), err, monitor.address)
|
||||
);
|
||||
|
||||
monitor.emit('resetServer', err);
|
||||
monitor.emit('resetConnectionPool');
|
||||
callback(err);
|
||||
}
|
||||
|
||||
if (monitor[kConnection] != null && !monitor[kConnection].closed) {
|
||||
const connectTimeoutMS = monitor.options.connectTimeoutMS;
|
||||
const maxAwaitTimeMS = monitor.options.heartbeatFrequencyMS;
|
||||
const topologyVersion = monitor[kServer].description.topologyVersion;
|
||||
const isAwaitable = topologyVersion != null;
|
||||
|
||||
const cmd = isAwaitable
|
||||
? { ismaster: true, maxAwaitTimeMS, topologyVersion: makeTopologyVersion(topologyVersion) }
|
||||
: { ismaster: true };
|
||||
|
||||
const options = isAwaitable
|
||||
? { socketTimeout: connectTimeoutMS + maxAwaitTimeMS, exhaustAllowed: true }
|
||||
: { socketTimeout: connectTimeoutMS };
|
||||
|
||||
if (isAwaitable && monitor[kRTTPinger] == null) {
|
||||
monitor[kRTTPinger] = new RTTPinger(monitor[kCancellationToken], monitor.connectOptions);
|
||||
}
|
||||
|
||||
monitor[kConnection].command('admin.$cmd', cmd, options, (err, result) => {
|
||||
if (err) {
|
||||
failureHandler(err);
|
||||
return;
|
||||
}
|
||||
|
||||
const isMaster = result.result;
|
||||
const duration = isAwaitable
|
||||
? monitor[kRTTPinger].roundTripTime
|
||||
: calculateDurationInMs(start);
|
||||
|
||||
monitor.emit(
|
||||
'serverHeartbeatSucceeded',
|
||||
new ServerHeartbeatSucceededEvent(duration, isMaster, monitor.address)
|
||||
);
|
||||
|
||||
// if we are using the streaming protocol then we immediately issue another `started`
|
||||
// event, otherwise the "check" is complete and return to the main monitor loop
|
||||
if (isAwaitable && isMaster.topologyVersion) {
|
||||
monitor.emit('serverHeartbeatStarted', new ServerHeartbeatStartedEvent(monitor.address));
|
||||
start = now();
|
||||
} else {
|
||||
if (monitor[kRTTPinger]) {
|
||||
monitor[kRTTPinger].close();
|
||||
monitor[kRTTPinger] = undefined;
|
||||
}
|
||||
|
||||
callback(undefined, isMaster);
|
||||
}
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// connecting does an implicit `ismaster`
|
||||
connect(monitor.connectOptions, monitor[kCancellationToken], (err, conn) => {
|
||||
if (conn && isInCloseState(monitor)) {
|
||||
conn.destroy({ force: true });
|
||||
return;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
monitor[kConnection] = undefined;
|
||||
|
||||
// we already reset the connection pool on network errors in all cases
|
||||
if (!(err instanceof MongoNetworkError)) {
|
||||
monitor.emit('resetConnectionPool');
|
||||
}
|
||||
|
||||
failureHandler(err);
|
||||
return;
|
||||
}
|
||||
|
||||
monitor[kConnection] = conn;
|
||||
monitor.emit(
|
||||
'serverHeartbeatSucceeded',
|
||||
new ServerHeartbeatSucceededEvent(
|
||||
calculateDurationInMs(start),
|
||||
conn.ismaster,
|
||||
monitor.address
|
||||
)
|
||||
);
|
||||
|
||||
callback(undefined, conn.ismaster);
|
||||
});
|
||||
}
|
||||
|
||||
function monitorServer(monitor) {
|
||||
return callback => {
|
||||
stateTransition(monitor, STATE_MONITORING);
|
||||
function done() {
|
||||
if (!isInCloseState(monitor)) {
|
||||
stateTransition(monitor, STATE_IDLE);
|
||||
}
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
// TODO: the next line is a legacy event, remove in v4
|
||||
process.nextTick(() => monitor.emit('monitoring', monitor[kServer]));
|
||||
|
||||
checkServer(monitor, (err, isMaster) => {
|
||||
if (err) {
|
||||
// otherwise an error occured on initial discovery, also bail
|
||||
if (monitor[kServer].description.type === ServerType.Unknown) {
|
||||
monitor.emit('resetServer', err);
|
||||
return done();
|
||||
}
|
||||
}
|
||||
|
||||
// if the check indicates streaming is supported, immediately reschedule monitoring
|
||||
if (isMaster && isMaster.topologyVersion) {
|
||||
setTimeout(() => {
|
||||
if (!isInCloseState(monitor)) {
|
||||
monitor[kMonitorId].wake();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
done();
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
function makeTopologyVersion(tv) {
|
||||
return {
|
||||
processId: tv.processId,
|
||||
counter: BSON.Long.fromNumber(tv.counter)
|
||||
};
|
||||
}
|
||||
|
||||
class RTTPinger {
|
||||
constructor(cancellationToken, options) {
|
||||
this[kConnection] = null;
|
||||
this[kCancellationToken] = cancellationToken;
|
||||
this[kRoundTripTime] = 0;
|
||||
this.closed = false;
|
||||
|
||||
const heartbeatFrequencyMS = options.heartbeatFrequencyMS;
|
||||
this[kMonitorId] = setTimeout(() => measureRoundTripTime(this, options), heartbeatFrequencyMS);
|
||||
}
|
||||
|
||||
get roundTripTime() {
|
||||
return this[kRoundTripTime];
|
||||
}
|
||||
|
||||
close() {
|
||||
this.closed = true;
|
||||
|
||||
clearTimeout(this[kMonitorId]);
|
||||
this[kMonitorId] = undefined;
|
||||
|
||||
if (this[kConnection]) {
|
||||
this[kConnection].destroy({ force: true });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function measureRoundTripTime(rttPinger, options) {
|
||||
const start = now();
|
||||
const cancellationToken = rttPinger[kCancellationToken];
|
||||
const heartbeatFrequencyMS = options.heartbeatFrequencyMS;
|
||||
if (rttPinger.closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
function measureAndReschedule(conn) {
|
||||
if (rttPinger.closed) {
|
||||
conn.destroy({ force: true });
|
||||
return;
|
||||
}
|
||||
|
||||
if (rttPinger[kConnection] == null) {
|
||||
rttPinger[kConnection] = conn;
|
||||
}
|
||||
|
||||
rttPinger[kRoundTripTime] = calculateDurationInMs(start);
|
||||
rttPinger[kMonitorId] = setTimeout(
|
||||
() => measureRoundTripTime(rttPinger, options),
|
||||
heartbeatFrequencyMS
|
||||
);
|
||||
}
|
||||
|
||||
if (rttPinger[kConnection] == null) {
|
||||
connect(options, cancellationToken, (err, conn) => {
|
||||
if (err) {
|
||||
rttPinger[kConnection] = undefined;
|
||||
rttPinger[kRoundTripTime] = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
measureAndReschedule(conn);
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
rttPinger[kConnection].command('admin.$cmd', { ismaster: 1 }, err => {
|
||||
if (err) {
|
||||
rttPinger[kConnection] = undefined;
|
||||
rttPinger[kRoundTripTime] = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
measureAndReschedule();
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
Monitor
|
||||
};
|
||||
564
node_modules/mongodb/lib/core/sdam/server.js
generated
vendored
Normal file
564
node_modules/mongodb/lib/core/sdam/server.js
generated
vendored
Normal file
@@ -0,0 +1,564 @@
|
||||
'use strict';
|
||||
const EventEmitter = require('events');
|
||||
const ConnectionPool = require('../../cmap/connection_pool').ConnectionPool;
|
||||
const CMAP_EVENT_NAMES = require('../../cmap/events').CMAP_EVENT_NAMES;
|
||||
const MongoError = require('../error').MongoError;
|
||||
const relayEvents = require('../utils').relayEvents;
|
||||
const BSON = require('../connection/utils').retrieveBSON();
|
||||
const Logger = require('../connection/logger');
|
||||
const ServerDescription = require('./server_description').ServerDescription;
|
||||
const compareTopologyVersion = require('./server_description').compareTopologyVersion;
|
||||
const ReadPreference = require('../topologies/read_preference');
|
||||
const Monitor = require('./monitor').Monitor;
|
||||
const MongoNetworkError = require('../error').MongoNetworkError;
|
||||
const MongoNetworkTimeoutError = require('../error').MongoNetworkTimeoutError;
|
||||
const collationNotSupported = require('../utils').collationNotSupported;
|
||||
const debugOptions = require('../connection/utils').debugOptions;
|
||||
const isSDAMUnrecoverableError = require('../error').isSDAMUnrecoverableError;
|
||||
const isRetryableWriteError = require('../error').isRetryableWriteError;
|
||||
const isNodeShuttingDownError = require('../error').isNodeShuttingDownError;
|
||||
const isNetworkErrorBeforeHandshake = require('../error').isNetworkErrorBeforeHandshake;
|
||||
const maxWireVersion = require('../utils').maxWireVersion;
|
||||
const makeStateMachine = require('../utils').makeStateMachine;
|
||||
const common = require('./common');
|
||||
const ServerType = common.ServerType;
|
||||
const isTransactionCommand = require('../transactions').isTransactionCommand;
|
||||
|
||||
// Used for filtering out fields for logging
|
||||
const DEBUG_FIELDS = [
|
||||
'reconnect',
|
||||
'reconnectTries',
|
||||
'reconnectInterval',
|
||||
'emitError',
|
||||
'cursorFactory',
|
||||
'host',
|
||||
'port',
|
||||
'size',
|
||||
'keepAlive',
|
||||
'keepAliveInitialDelay',
|
||||
'noDelay',
|
||||
'connectionTimeout',
|
||||
'checkServerIdentity',
|
||||
'socketTimeout',
|
||||
'ssl',
|
||||
'ca',
|
||||
'crl',
|
||||
'cert',
|
||||
'key',
|
||||
'rejectUnauthorized',
|
||||
'promoteLongs',
|
||||
'promoteValues',
|
||||
'promoteBuffers',
|
||||
'servername'
|
||||
];
|
||||
|
||||
const STATE_CLOSING = common.STATE_CLOSING;
|
||||
const STATE_CLOSED = common.STATE_CLOSED;
|
||||
const STATE_CONNECTING = common.STATE_CONNECTING;
|
||||
const STATE_CONNECTED = common.STATE_CONNECTED;
|
||||
const stateTransition = makeStateMachine({
|
||||
[STATE_CLOSED]: [STATE_CLOSED, STATE_CONNECTING],
|
||||
[STATE_CONNECTING]: [STATE_CONNECTING, STATE_CLOSING, STATE_CONNECTED, STATE_CLOSED],
|
||||
[STATE_CONNECTED]: [STATE_CONNECTED, STATE_CLOSING, STATE_CLOSED],
|
||||
[STATE_CLOSING]: [STATE_CLOSING, STATE_CLOSED]
|
||||
});
|
||||
|
||||
const kMonitor = Symbol('monitor');
|
||||
|
||||
/**
|
||||
*
|
||||
* @fires Server#serverHeartbeatStarted
|
||||
* @fires Server#serverHeartbeatSucceeded
|
||||
* @fires Server#serverHeartbeatFailed
|
||||
*/
|
||||
class Server extends EventEmitter {
|
||||
/**
|
||||
* Create a server
|
||||
*
|
||||
* @param {ServerDescription} description
|
||||
* @param {Object} options
|
||||
*/
|
||||
constructor(description, options, topology) {
|
||||
super();
|
||||
|
||||
this.s = {
|
||||
// the server description
|
||||
description,
|
||||
// a saved copy of the incoming options
|
||||
options,
|
||||
// the server logger
|
||||
logger: Logger('Server', options),
|
||||
// the bson parser
|
||||
bson:
|
||||
options.bson ||
|
||||
new BSON([
|
||||
BSON.Binary,
|
||||
BSON.Code,
|
||||
BSON.DBRef,
|
||||
BSON.Decimal128,
|
||||
BSON.Double,
|
||||
BSON.Int32,
|
||||
BSON.Long,
|
||||
BSON.Map,
|
||||
BSON.MaxKey,
|
||||
BSON.MinKey,
|
||||
BSON.ObjectId,
|
||||
BSON.BSONRegExp,
|
||||
BSON.Symbol,
|
||||
BSON.Timestamp
|
||||
]),
|
||||
// the server state
|
||||
state: STATE_CLOSED,
|
||||
credentials: options.credentials,
|
||||
topology
|
||||
};
|
||||
|
||||
// create the connection pool
|
||||
// NOTE: this used to happen in `connect`, we supported overriding pool options there
|
||||
const poolOptions = Object.assign(
|
||||
{ host: this.description.host, port: this.description.port, bson: this.s.bson },
|
||||
options
|
||||
);
|
||||
|
||||
this.s.pool = new ConnectionPool(poolOptions);
|
||||
relayEvents(
|
||||
this.s.pool,
|
||||
this,
|
||||
['commandStarted', 'commandSucceeded', 'commandFailed'].concat(CMAP_EVENT_NAMES)
|
||||
);
|
||||
|
||||
this.s.pool.on('clusterTimeReceived', clusterTime => {
|
||||
this.clusterTime = clusterTime;
|
||||
});
|
||||
|
||||
// create the monitor
|
||||
this[kMonitor] = new Monitor(this, this.s.options);
|
||||
relayEvents(this[kMonitor], this, [
|
||||
'serverHeartbeatStarted',
|
||||
'serverHeartbeatSucceeded',
|
||||
'serverHeartbeatFailed',
|
||||
|
||||
// legacy events
|
||||
'monitoring'
|
||||
]);
|
||||
|
||||
this[kMonitor].on('resetConnectionPool', () => {
|
||||
this.s.pool.clear();
|
||||
});
|
||||
|
||||
this[kMonitor].on('resetServer', error => markServerUnknown(this, error));
|
||||
this[kMonitor].on('serverHeartbeatSucceeded', event => {
|
||||
this.emit(
|
||||
'descriptionReceived',
|
||||
new ServerDescription(this.description.address, event.reply, {
|
||||
roundTripTime: calculateRoundTripTime(this.description.roundTripTime, event.duration)
|
||||
})
|
||||
);
|
||||
|
||||
if (this.s.state === STATE_CONNECTING) {
|
||||
stateTransition(this, STATE_CONNECTED);
|
||||
this.emit('connect', this);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
get description() {
|
||||
return this.s.description;
|
||||
}
|
||||
|
||||
get name() {
|
||||
return this.s.description.address;
|
||||
}
|
||||
|
||||
get autoEncrypter() {
|
||||
if (this.s.options && this.s.options.autoEncrypter) {
|
||||
return this.s.options.autoEncrypter;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initiate server connect
|
||||
*/
|
||||
connect() {
|
||||
if (this.s.state !== STATE_CLOSED) {
|
||||
return;
|
||||
}
|
||||
|
||||
stateTransition(this, STATE_CONNECTING);
|
||||
this[kMonitor].connect();
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroy the server connection
|
||||
*
|
||||
* @param {object} [options] Optional settings
|
||||
* @param {Boolean} [options.force=false] Force destroy the pool
|
||||
*/
|
||||
destroy(options, callback) {
|
||||
if (typeof options === 'function') (callback = options), (options = {});
|
||||
options = Object.assign({}, { force: false }, options);
|
||||
|
||||
if (this.s.state === STATE_CLOSED) {
|
||||
if (typeof callback === 'function') {
|
||||
callback();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
stateTransition(this, STATE_CLOSING);
|
||||
|
||||
this[kMonitor].close();
|
||||
this.s.pool.close(options, err => {
|
||||
stateTransition(this, STATE_CLOSED);
|
||||
this.emit('closed');
|
||||
if (typeof callback === 'function') {
|
||||
callback(err);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Immediately schedule monitoring of this server. If there already an attempt being made
|
||||
* this will be a no-op.
|
||||
*/
|
||||
requestCheck() {
|
||||
this[kMonitor].requestCheck();
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a command
|
||||
*
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {object} cmd The command hash
|
||||
* @param {object} [options] Optional settings
|
||||
* @param {ReadPreference} [options.readPreference] Specify read preference if command supports it
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.checkKeys=false] Specify if the bson parser should validate keys.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {Boolean} [options.fullResult=false] Return the full envelope instead of just the result document.
|
||||
* @param {ClientSession} [options.session] Session to use for the operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
command(ns, cmd, options, callback) {
|
||||
if (typeof options === 'function') {
|
||||
(callback = options), (options = {}), (options = options || {});
|
||||
}
|
||||
|
||||
if (this.s.state === STATE_CLOSING || this.s.state === STATE_CLOSED) {
|
||||
callback(new MongoError('server is closed'));
|
||||
return;
|
||||
}
|
||||
|
||||
const error = basicReadValidations(this, options);
|
||||
if (error) {
|
||||
return callback(error);
|
||||
}
|
||||
|
||||
// Clone the options
|
||||
options = Object.assign({}, options, { wireProtocolCommand: false });
|
||||
|
||||
// Debug log
|
||||
if (this.s.logger.isDebug()) {
|
||||
this.s.logger.debug(
|
||||
`executing command [${JSON.stringify({
|
||||
ns,
|
||||
cmd,
|
||||
options: debugOptions(DEBUG_FIELDS, options)
|
||||
})}] against ${this.name}`
|
||||
);
|
||||
}
|
||||
|
||||
// error if collation not supported
|
||||
if (collationNotSupported(this, cmd)) {
|
||||
callback(new MongoError(`server ${this.name} does not support collation`));
|
||||
return;
|
||||
}
|
||||
|
||||
this.s.pool.withConnection((err, conn, cb) => {
|
||||
if (err) {
|
||||
markServerUnknown(this, err);
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
conn.command(ns, cmd, options, makeOperationHandler(this, conn, cmd, options, cb));
|
||||
}, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a query against the server
|
||||
*
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {object} cmd The command document for the query
|
||||
* @param {object} options Optional settings
|
||||
* @param {function} callback
|
||||
*/
|
||||
query(ns, cmd, cursorState, options, callback) {
|
||||
if (this.s.state === STATE_CLOSING || this.s.state === STATE_CLOSED) {
|
||||
callback(new MongoError('server is closed'));
|
||||
return;
|
||||
}
|
||||
|
||||
this.s.pool.withConnection((err, conn, cb) => {
|
||||
if (err) {
|
||||
markServerUnknown(this, err);
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
conn.query(ns, cmd, cursorState, options, makeOperationHandler(this, conn, cmd, options, cb));
|
||||
}, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a `getMore` against the server
|
||||
*
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {object} cursorState State data associated with the cursor calling this method
|
||||
* @param {object} options Optional settings
|
||||
* @param {function} callback
|
||||
*/
|
||||
getMore(ns, cursorState, batchSize, options, callback) {
|
||||
if (this.s.state === STATE_CLOSING || this.s.state === STATE_CLOSED) {
|
||||
callback(new MongoError('server is closed'));
|
||||
return;
|
||||
}
|
||||
|
||||
this.s.pool.withConnection((err, conn, cb) => {
|
||||
if (err) {
|
||||
markServerUnknown(this, err);
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
conn.getMore(
|
||||
ns,
|
||||
cursorState,
|
||||
batchSize,
|
||||
options,
|
||||
makeOperationHandler(this, conn, null, options, cb)
|
||||
);
|
||||
}, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a `killCursors` command against the server
|
||||
*
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {object} cursorState State data associated with the cursor calling this method
|
||||
* @param {function} callback
|
||||
*/
|
||||
killCursors(ns, cursorState, callback) {
|
||||
if (this.s.state === STATE_CLOSING || this.s.state === STATE_CLOSED) {
|
||||
if (typeof callback === 'function') {
|
||||
callback(new MongoError('server is closed'));
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
this.s.pool.withConnection((err, conn, cb) => {
|
||||
if (err) {
|
||||
markServerUnknown(this, err);
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
conn.killCursors(ns, cursorState, makeOperationHandler(this, conn, null, undefined, cb));
|
||||
}, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Insert one or more documents
|
||||
* @method
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {array} ops An array of documents to insert
|
||||
* @param {boolean} [options.ordered=true] Execute in order or out of order
|
||||
* @param {object} [options.writeConcern={}] Write concern for the operation
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {ClientSession} [options.session] Session to use for the operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
insert(ns, ops, options, callback) {
|
||||
executeWriteOperation({ server: this, op: 'insert', ns, ops }, options, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform one or more update operations
|
||||
* @method
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {array} ops An array of updates
|
||||
* @param {boolean} [options.ordered=true] Execute in order or out of order
|
||||
* @param {object} [options.writeConcern={}] Write concern for the operation
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {ClientSession} [options.session] Session to use for the operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
update(ns, ops, options, callback) {
|
||||
executeWriteOperation({ server: this, op: 'update', ns, ops }, options, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform one or more remove operations
|
||||
* @method
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {array} ops An array of removes
|
||||
* @param {boolean} [options.ordered=true] Execute in order or out of order
|
||||
* @param {object} [options.writeConcern={}] Write concern for the operation
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {ClientSession} [options.session] Session to use for the operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
remove(ns, ops, options, callback) {
|
||||
executeWriteOperation({ server: this, op: 'remove', ns, ops }, options, callback);
|
||||
}
|
||||
}
|
||||
|
||||
Object.defineProperty(Server.prototype, 'clusterTime', {
|
||||
get: function() {
|
||||
return this.s.topology.clusterTime;
|
||||
},
|
||||
set: function(clusterTime) {
|
||||
this.s.topology.clusterTime = clusterTime;
|
||||
}
|
||||
});
|
||||
|
||||
function supportsRetryableWrites(server) {
|
||||
return (
|
||||
server.description.maxWireVersion >= 6 &&
|
||||
server.description.logicalSessionTimeoutMinutes &&
|
||||
server.description.type !== ServerType.Standalone
|
||||
);
|
||||
}
|
||||
|
||||
function calculateRoundTripTime(oldRtt, duration) {
|
||||
if (oldRtt === -1) {
|
||||
return duration;
|
||||
}
|
||||
|
||||
const alpha = 0.2;
|
||||
return alpha * duration + (1 - alpha) * oldRtt;
|
||||
}
|
||||
|
||||
function basicReadValidations(server, options) {
|
||||
if (options.readPreference && !(options.readPreference instanceof ReadPreference)) {
|
||||
return new MongoError('readPreference must be an instance of ReadPreference');
|
||||
}
|
||||
}
|
||||
|
||||
function executeWriteOperation(args, options, callback) {
|
||||
if (typeof options === 'function') (callback = options), (options = {});
|
||||
options = options || {};
|
||||
|
||||
// TODO: once we drop Node 4, use destructuring either here or in arguments.
|
||||
const server = args.server;
|
||||
const op = args.op;
|
||||
const ns = args.ns;
|
||||
const ops = Array.isArray(args.ops) ? args.ops : [args.ops];
|
||||
|
||||
if (server.s.state === STATE_CLOSING || server.s.state === STATE_CLOSED) {
|
||||
callback(new MongoError('server is closed'));
|
||||
return;
|
||||
}
|
||||
|
||||
if (collationNotSupported(server, options)) {
|
||||
callback(new MongoError(`server ${server.name} does not support collation`));
|
||||
return;
|
||||
}
|
||||
const unacknowledgedWrite = options.writeConcern && options.writeConcern.w === 0;
|
||||
if (unacknowledgedWrite || maxWireVersion(server) < 5) {
|
||||
if ((op === 'update' || op === 'remove') && ops.find(o => o.hint)) {
|
||||
callback(new MongoError(`servers < 3.4 do not support hint on ${op}`));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
server.s.pool.withConnection((err, conn, cb) => {
|
||||
if (err) {
|
||||
markServerUnknown(server, err);
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
conn[op](ns, ops, options, makeOperationHandler(server, conn, ops, options, cb));
|
||||
}, callback);
|
||||
}
|
||||
|
||||
function markServerUnknown(server, error) {
|
||||
if (error instanceof MongoNetworkError && !(error instanceof MongoNetworkTimeoutError)) {
|
||||
server[kMonitor].reset();
|
||||
}
|
||||
|
||||
server.emit(
|
||||
'descriptionReceived',
|
||||
new ServerDescription(server.description.address, null, {
|
||||
error,
|
||||
topologyVersion:
|
||||
error && error.topologyVersion ? error.topologyVersion : server.description.topologyVersion
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
function connectionIsStale(pool, connection) {
|
||||
return connection.generation !== pool.generation;
|
||||
}
|
||||
|
||||
function shouldHandleStateChangeError(server, err) {
|
||||
const etv = err.topologyVersion;
|
||||
const stv = server.description.topologyVersion;
|
||||
|
||||
return compareTopologyVersion(stv, etv) < 0;
|
||||
}
|
||||
|
||||
function inActiveTransaction(session, cmd) {
|
||||
return session && session.inTransaction() && !isTransactionCommand(cmd);
|
||||
}
|
||||
|
||||
function makeOperationHandler(server, connection, cmd, options, callback) {
|
||||
const session = options && options.session;
|
||||
|
||||
return function handleOperationResult(err, result) {
|
||||
if (err && !connectionIsStale(server.s.pool, connection)) {
|
||||
if (err instanceof MongoNetworkError) {
|
||||
if (session && !session.hasEnded) {
|
||||
session.serverSession.isDirty = true;
|
||||
}
|
||||
|
||||
if (supportsRetryableWrites(server) && !inActiveTransaction(session, cmd)) {
|
||||
err.addErrorLabel('RetryableWriteError');
|
||||
}
|
||||
|
||||
if (!(err instanceof MongoNetworkTimeoutError) || isNetworkErrorBeforeHandshake(err)) {
|
||||
markServerUnknown(server, err);
|
||||
server.s.pool.clear();
|
||||
}
|
||||
} else {
|
||||
// if pre-4.4 server, then add error label if its a retryable write error
|
||||
if (
|
||||
maxWireVersion(server) < 9 &&
|
||||
isRetryableWriteError(err) &&
|
||||
!inActiveTransaction(session, cmd)
|
||||
) {
|
||||
err.addErrorLabel('RetryableWriteError');
|
||||
}
|
||||
|
||||
if (isSDAMUnrecoverableError(err)) {
|
||||
if (shouldHandleStateChangeError(server, err)) {
|
||||
if (maxWireVersion(server) <= 7 || isNodeShuttingDownError(err)) {
|
||||
server.s.pool.clear();
|
||||
}
|
||||
|
||||
markServerUnknown(server, err);
|
||||
process.nextTick(() => server.requestCheck());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
callback(err, result);
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
Server
|
||||
};
|
||||
227
node_modules/mongodb/lib/core/sdam/server_description.js
generated
vendored
Normal file
227
node_modules/mongodb/lib/core/sdam/server_description.js
generated
vendored
Normal file
@@ -0,0 +1,227 @@
|
||||
'use strict';
|
||||
|
||||
const arrayStrictEqual = require('../utils').arrayStrictEqual;
|
||||
const tagsStrictEqual = require('../utils').tagsStrictEqual;
|
||||
const errorStrictEqual = require('../utils').errorStrictEqual;
|
||||
const ServerType = require('./common').ServerType;
|
||||
const now = require('../../utils').now;
|
||||
|
||||
const WRITABLE_SERVER_TYPES = new Set([
|
||||
ServerType.RSPrimary,
|
||||
ServerType.Standalone,
|
||||
ServerType.Mongos
|
||||
]);
|
||||
|
||||
const DATA_BEARING_SERVER_TYPES = new Set([
|
||||
ServerType.RSPrimary,
|
||||
ServerType.RSSecondary,
|
||||
ServerType.Mongos,
|
||||
ServerType.Standalone
|
||||
]);
|
||||
|
||||
const ISMASTER_FIELDS = [
|
||||
'minWireVersion',
|
||||
'maxWireVersion',
|
||||
'maxBsonObjectSize',
|
||||
'maxMessageSizeBytes',
|
||||
'maxWriteBatchSize',
|
||||
'compression',
|
||||
'me',
|
||||
'hosts',
|
||||
'passives',
|
||||
'arbiters',
|
||||
'tags',
|
||||
'setName',
|
||||
'setVersion',
|
||||
'electionId',
|
||||
'primary',
|
||||
'logicalSessionTimeoutMinutes',
|
||||
'saslSupportedMechs',
|
||||
'__nodejs_mock_server__',
|
||||
'$clusterTime'
|
||||
];
|
||||
|
||||
/**
|
||||
* The client's view of a single server, based on the most recent ismaster outcome.
|
||||
*
|
||||
* Internal type, not meant to be directly instantiated
|
||||
*/
|
||||
class ServerDescription {
|
||||
/**
|
||||
* Create a ServerDescription
|
||||
* @param {String} address The address of the server
|
||||
* @param {Object} [ismaster] An optional ismaster response for this server
|
||||
* @param {Object} [options] Optional settings
|
||||
* @param {Number} [options.roundTripTime] The round trip time to ping this server (in ms)
|
||||
* @param {Error} [options.error] An Error used for better reporting debugging
|
||||
* @param {any} [options.topologyVersion] The topologyVersion
|
||||
*/
|
||||
constructor(address, ismaster, options) {
|
||||
options = options || {};
|
||||
ismaster = Object.assign(
|
||||
{
|
||||
minWireVersion: 0,
|
||||
maxWireVersion: 0,
|
||||
hosts: [],
|
||||
passives: [],
|
||||
arbiters: [],
|
||||
tags: []
|
||||
},
|
||||
ismaster
|
||||
);
|
||||
|
||||
this.address = address;
|
||||
this.error = options.error;
|
||||
this.roundTripTime = options.roundTripTime || -1;
|
||||
this.lastUpdateTime = now();
|
||||
this.lastWriteDate = ismaster.lastWrite ? ismaster.lastWrite.lastWriteDate : null;
|
||||
this.opTime = ismaster.lastWrite ? ismaster.lastWrite.opTime : null;
|
||||
this.type = parseServerType(ismaster);
|
||||
this.topologyVersion = options.topologyVersion || ismaster.topologyVersion;
|
||||
|
||||
// direct mappings
|
||||
ISMASTER_FIELDS.forEach(field => {
|
||||
if (typeof ismaster[field] !== 'undefined') this[field] = ismaster[field];
|
||||
});
|
||||
|
||||
// normalize case for hosts
|
||||
if (this.me) this.me = this.me.toLowerCase();
|
||||
this.hosts = this.hosts.map(host => host.toLowerCase());
|
||||
this.passives = this.passives.map(host => host.toLowerCase());
|
||||
this.arbiters = this.arbiters.map(host => host.toLowerCase());
|
||||
}
|
||||
|
||||
get allHosts() {
|
||||
return this.hosts.concat(this.arbiters).concat(this.passives);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return {Boolean} Is this server available for reads
|
||||
*/
|
||||
get isReadable() {
|
||||
return this.type === ServerType.RSSecondary || this.isWritable;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return {Boolean} Is this server data bearing
|
||||
*/
|
||||
get isDataBearing() {
|
||||
return DATA_BEARING_SERVER_TYPES.has(this.type);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return {Boolean} Is this server available for writes
|
||||
*/
|
||||
get isWritable() {
|
||||
return WRITABLE_SERVER_TYPES.has(this.type);
|
||||
}
|
||||
|
||||
get host() {
|
||||
const chopLength = `:${this.port}`.length;
|
||||
return this.address.slice(0, -chopLength);
|
||||
}
|
||||
|
||||
get port() {
|
||||
const port = this.address.split(':').pop();
|
||||
return port ? Number.parseInt(port, 10) : port;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if another `ServerDescription` is equal to this one per the rules defined
|
||||
* in the {@link https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#serverdescription|SDAM spec}
|
||||
*
|
||||
* @param {ServerDescription} other
|
||||
* @return {Boolean}
|
||||
*/
|
||||
equals(other) {
|
||||
const topologyVersionsEqual =
|
||||
this.topologyVersion === other.topologyVersion ||
|
||||
compareTopologyVersion(this.topologyVersion, other.topologyVersion) === 0;
|
||||
|
||||
return (
|
||||
other != null &&
|
||||
errorStrictEqual(this.error, other.error) &&
|
||||
this.type === other.type &&
|
||||
this.minWireVersion === other.minWireVersion &&
|
||||
this.me === other.me &&
|
||||
arrayStrictEqual(this.hosts, other.hosts) &&
|
||||
tagsStrictEqual(this.tags, other.tags) &&
|
||||
this.setName === other.setName &&
|
||||
this.setVersion === other.setVersion &&
|
||||
(this.electionId
|
||||
? other.electionId && this.electionId.equals(other.electionId)
|
||||
: this.electionId === other.electionId) &&
|
||||
this.primary === other.primary &&
|
||||
this.logicalSessionTimeoutMinutes === other.logicalSessionTimeoutMinutes &&
|
||||
topologyVersionsEqual
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses an `ismaster` message and determines the server type
|
||||
*
|
||||
* @param {Object} ismaster The `ismaster` message to parse
|
||||
* @return {ServerType}
|
||||
*/
|
||||
function parseServerType(ismaster) {
|
||||
if (!ismaster || !ismaster.ok) {
|
||||
return ServerType.Unknown;
|
||||
}
|
||||
|
||||
if (ismaster.isreplicaset) {
|
||||
return ServerType.RSGhost;
|
||||
}
|
||||
|
||||
if (ismaster.msg && ismaster.msg === 'isdbgrid') {
|
||||
return ServerType.Mongos;
|
||||
}
|
||||
|
||||
if (ismaster.setName) {
|
||||
if (ismaster.hidden) {
|
||||
return ServerType.RSOther;
|
||||
} else if (ismaster.ismaster) {
|
||||
return ServerType.RSPrimary;
|
||||
} else if (ismaster.secondary) {
|
||||
return ServerType.RSSecondary;
|
||||
} else if (ismaster.arbiterOnly) {
|
||||
return ServerType.RSArbiter;
|
||||
} else {
|
||||
return ServerType.RSOther;
|
||||
}
|
||||
}
|
||||
|
||||
return ServerType.Standalone;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compares two topology versions.
|
||||
*
|
||||
* @param {object} lhs
|
||||
* @param {object} rhs
|
||||
* @returns A negative number if `lhs` is older than `rhs`; positive if `lhs` is newer than `rhs`; 0 if they are equivalent.
|
||||
*/
|
||||
function compareTopologyVersion(lhs, rhs) {
|
||||
if (lhs == null || rhs == null) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (lhs.processId.equals(rhs.processId)) {
|
||||
// TODO: handle counters as Longs
|
||||
if (lhs.counter === rhs.counter) {
|
||||
return 0;
|
||||
} else if (lhs.counter < rhs.counter) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
ServerDescription,
|
||||
parseServerType,
|
||||
compareTopologyVersion
|
||||
};
|
||||
238
node_modules/mongodb/lib/core/sdam/server_selection.js
generated
vendored
Normal file
238
node_modules/mongodb/lib/core/sdam/server_selection.js
generated
vendored
Normal file
@@ -0,0 +1,238 @@
|
||||
'use strict';
|
||||
const ServerType = require('./common').ServerType;
|
||||
const TopologyType = require('./common').TopologyType;
|
||||
const ReadPreference = require('../topologies/read_preference');
|
||||
const MongoError = require('../error').MongoError;
|
||||
|
||||
// max staleness constants
|
||||
const IDLE_WRITE_PERIOD = 10000;
|
||||
const SMALLEST_MAX_STALENESS_SECONDS = 90;
|
||||
|
||||
/**
|
||||
* Returns a server selector that selects for writable servers
|
||||
*/
|
||||
function writableServerSelector() {
|
||||
return function(topologyDescription, servers) {
|
||||
return latencyWindowReducer(
|
||||
topologyDescription,
|
||||
servers.filter(s => s.isWritable)
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Reduces the passed in array of servers by the rules of the "Max Staleness" specification
|
||||
* found here: https://github.com/mongodb/specifications/blob/master/source/max-staleness/max-staleness.rst
|
||||
*
|
||||
* @param {ReadPreference} readPreference The read preference providing max staleness guidance
|
||||
* @param {topologyDescription} topologyDescription The topology description
|
||||
* @param {ServerDescription[]} servers The list of server descriptions to be reduced
|
||||
* @return {ServerDescription[]} The list of servers that satisfy the requirements of max staleness
|
||||
*/
|
||||
function maxStalenessReducer(readPreference, topologyDescription, servers) {
|
||||
if (readPreference.maxStalenessSeconds == null || readPreference.maxStalenessSeconds < 0) {
|
||||
return servers;
|
||||
}
|
||||
|
||||
const maxStaleness = readPreference.maxStalenessSeconds;
|
||||
const maxStalenessVariance =
|
||||
(topologyDescription.heartbeatFrequencyMS + IDLE_WRITE_PERIOD) / 1000;
|
||||
if (maxStaleness < maxStalenessVariance) {
|
||||
throw new MongoError(`maxStalenessSeconds must be at least ${maxStalenessVariance} seconds`);
|
||||
}
|
||||
|
||||
if (maxStaleness < SMALLEST_MAX_STALENESS_SECONDS) {
|
||||
throw new MongoError(
|
||||
`maxStalenessSeconds must be at least ${SMALLEST_MAX_STALENESS_SECONDS} seconds`
|
||||
);
|
||||
}
|
||||
|
||||
if (topologyDescription.type === TopologyType.ReplicaSetWithPrimary) {
|
||||
const primary = Array.from(topologyDescription.servers.values()).filter(primaryFilter)[0];
|
||||
return servers.reduce((result, server) => {
|
||||
const stalenessMS =
|
||||
server.lastUpdateTime -
|
||||
server.lastWriteDate -
|
||||
(primary.lastUpdateTime - primary.lastWriteDate) +
|
||||
topologyDescription.heartbeatFrequencyMS;
|
||||
|
||||
const staleness = stalenessMS / 1000;
|
||||
if (staleness <= readPreference.maxStalenessSeconds) result.push(server);
|
||||
return result;
|
||||
}, []);
|
||||
}
|
||||
|
||||
if (topologyDescription.type === TopologyType.ReplicaSetNoPrimary) {
|
||||
if (servers.length === 0) {
|
||||
return servers;
|
||||
}
|
||||
|
||||
const sMax = servers.reduce((max, s) => (s.lastWriteDate > max.lastWriteDate ? s : max));
|
||||
return servers.reduce((result, server) => {
|
||||
const stalenessMS =
|
||||
sMax.lastWriteDate - server.lastWriteDate + topologyDescription.heartbeatFrequencyMS;
|
||||
|
||||
const staleness = stalenessMS / 1000;
|
||||
if (staleness <= readPreference.maxStalenessSeconds) result.push(server);
|
||||
return result;
|
||||
}, []);
|
||||
}
|
||||
|
||||
return servers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines whether a server's tags match a given set of tags
|
||||
*
|
||||
* @param {String[]} tagSet The requested tag set to match
|
||||
* @param {String[]} serverTags The server's tags
|
||||
*/
|
||||
function tagSetMatch(tagSet, serverTags) {
|
||||
const keys = Object.keys(tagSet);
|
||||
const serverTagKeys = Object.keys(serverTags);
|
||||
for (let i = 0; i < keys.length; ++i) {
|
||||
const key = keys[i];
|
||||
if (serverTagKeys.indexOf(key) === -1 || serverTags[key] !== tagSet[key]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reduces a set of server descriptions based on tags requested by the read preference
|
||||
*
|
||||
* @param {ReadPreference} readPreference The read preference providing the requested tags
|
||||
* @param {ServerDescription[]} servers The list of server descriptions to reduce
|
||||
* @return {ServerDescription[]} The list of servers matching the requested tags
|
||||
*/
|
||||
function tagSetReducer(readPreference, servers) {
|
||||
if (
|
||||
readPreference.tags == null ||
|
||||
(Array.isArray(readPreference.tags) && readPreference.tags.length === 0)
|
||||
) {
|
||||
return servers;
|
||||
}
|
||||
|
||||
for (let i = 0; i < readPreference.tags.length; ++i) {
|
||||
const tagSet = readPreference.tags[i];
|
||||
const serversMatchingTagset = servers.reduce((matched, server) => {
|
||||
if (tagSetMatch(tagSet, server.tags)) matched.push(server);
|
||||
return matched;
|
||||
}, []);
|
||||
|
||||
if (serversMatchingTagset.length) {
|
||||
return serversMatchingTagset;
|
||||
}
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Reduces a list of servers to ensure they fall within an acceptable latency window. This is
|
||||
* further specified in the "Server Selection" specification, found here:
|
||||
* https://github.com/mongodb/specifications/blob/master/source/server-selection/server-selection.rst
|
||||
*
|
||||
* @param {topologyDescription} topologyDescription The topology description
|
||||
* @param {ServerDescription[]} servers The list of servers to reduce
|
||||
* @returns {ServerDescription[]} The servers which fall within an acceptable latency window
|
||||
*/
|
||||
function latencyWindowReducer(topologyDescription, servers) {
|
||||
const low = servers.reduce(
|
||||
(min, server) => (min === -1 ? server.roundTripTime : Math.min(server.roundTripTime, min)),
|
||||
-1
|
||||
);
|
||||
|
||||
const high = low + topologyDescription.localThresholdMS;
|
||||
|
||||
return servers.reduce((result, server) => {
|
||||
if (server.roundTripTime <= high && server.roundTripTime >= low) result.push(server);
|
||||
return result;
|
||||
}, []);
|
||||
}
|
||||
|
||||
// filters
|
||||
function primaryFilter(server) {
|
||||
return server.type === ServerType.RSPrimary;
|
||||
}
|
||||
|
||||
function secondaryFilter(server) {
|
||||
return server.type === ServerType.RSSecondary;
|
||||
}
|
||||
|
||||
function nearestFilter(server) {
|
||||
return server.type === ServerType.RSSecondary || server.type === ServerType.RSPrimary;
|
||||
}
|
||||
|
||||
function knownFilter(server) {
|
||||
return server.type !== ServerType.Unknown;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a function which selects servers based on a provided read preference
|
||||
*
|
||||
* @param {ReadPreference} readPreference The read preference to select with
|
||||
*/
|
||||
function readPreferenceServerSelector(readPreference) {
|
||||
if (!readPreference.isValid()) {
|
||||
throw new TypeError('Invalid read preference specified');
|
||||
}
|
||||
|
||||
return function(topologyDescription, servers) {
|
||||
const commonWireVersion = topologyDescription.commonWireVersion;
|
||||
if (
|
||||
commonWireVersion &&
|
||||
readPreference.minWireVersion &&
|
||||
readPreference.minWireVersion > commonWireVersion
|
||||
) {
|
||||
throw new MongoError(
|
||||
`Minimum wire version '${readPreference.minWireVersion}' required, but found '${commonWireVersion}'`
|
||||
);
|
||||
}
|
||||
|
||||
if (topologyDescription.type === TopologyType.Unknown) {
|
||||
return [];
|
||||
}
|
||||
|
||||
if (
|
||||
topologyDescription.type === TopologyType.Single ||
|
||||
topologyDescription.type === TopologyType.Sharded
|
||||
) {
|
||||
return latencyWindowReducer(topologyDescription, servers.filter(knownFilter));
|
||||
}
|
||||
|
||||
const mode = readPreference.mode;
|
||||
if (mode === ReadPreference.PRIMARY) {
|
||||
return servers.filter(primaryFilter);
|
||||
}
|
||||
|
||||
if (mode === ReadPreference.PRIMARY_PREFERRED) {
|
||||
const result = servers.filter(primaryFilter);
|
||||
if (result.length) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
const filter = mode === ReadPreference.NEAREST ? nearestFilter : secondaryFilter;
|
||||
const selectedServers = latencyWindowReducer(
|
||||
topologyDescription,
|
||||
tagSetReducer(
|
||||
readPreference,
|
||||
maxStalenessReducer(readPreference, topologyDescription, servers.filter(filter))
|
||||
)
|
||||
);
|
||||
|
||||
if (mode === ReadPreference.SECONDARY_PREFERRED && selectedServers.length === 0) {
|
||||
return servers.filter(primaryFilter);
|
||||
}
|
||||
|
||||
return selectedServers;
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
writableServerSelector,
|
||||
readPreferenceServerSelector
|
||||
};
|
||||
135
node_modules/mongodb/lib/core/sdam/srv_polling.js
generated
vendored
Normal file
135
node_modules/mongodb/lib/core/sdam/srv_polling.js
generated
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
'use strict';
|
||||
|
||||
const Logger = require('../connection/logger');
|
||||
const EventEmitter = require('events').EventEmitter;
|
||||
const dns = require('dns');
|
||||
/**
|
||||
* Determines whether a provided address matches the provided parent domain in order
|
||||
* to avoid certain attack vectors.
|
||||
*
|
||||
* @param {String} srvAddress The address to check against a domain
|
||||
* @param {String} parentDomain The domain to check the provided address against
|
||||
* @return {Boolean} Whether the provided address matches the parent domain
|
||||
*/
|
||||
function matchesParentDomain(srvAddress, parentDomain) {
|
||||
const regex = /^.*?\./;
|
||||
const srv = `.${srvAddress.replace(regex, '')}`;
|
||||
const parent = `.${parentDomain.replace(regex, '')}`;
|
||||
return srv.endsWith(parent);
|
||||
}
|
||||
|
||||
class SrvPollingEvent {
|
||||
constructor(srvRecords) {
|
||||
this.srvRecords = srvRecords;
|
||||
}
|
||||
|
||||
addresses() {
|
||||
return new Set(this.srvRecords.map(record => `${record.name}:${record.port}`));
|
||||
}
|
||||
}
|
||||
|
||||
class SrvPoller extends EventEmitter {
|
||||
/**
|
||||
* @param {object} options
|
||||
* @param {string} options.srvHost
|
||||
* @param {number} [options.heartbeatFrequencyMS]
|
||||
* @param {function} [options.logger]
|
||||
* @param {string} [options.loggerLevel]
|
||||
*/
|
||||
constructor(options) {
|
||||
super();
|
||||
|
||||
if (!options || !options.srvHost) {
|
||||
throw new TypeError('options for SrvPoller must exist and include srvHost');
|
||||
}
|
||||
|
||||
this.srvHost = options.srvHost;
|
||||
this.rescanSrvIntervalMS = 60000;
|
||||
this.heartbeatFrequencyMS = options.heartbeatFrequencyMS || 10000;
|
||||
this.logger = Logger('srvPoller', options);
|
||||
|
||||
this.haMode = false;
|
||||
this.generation = 0;
|
||||
|
||||
this._timeout = null;
|
||||
}
|
||||
|
||||
get srvAddress() {
|
||||
return `_mongodb._tcp.${this.srvHost}`;
|
||||
}
|
||||
|
||||
get intervalMS() {
|
||||
return this.haMode ? this.heartbeatFrequencyMS : this.rescanSrvIntervalMS;
|
||||
}
|
||||
|
||||
start() {
|
||||
if (!this._timeout) {
|
||||
this.schedule();
|
||||
}
|
||||
}
|
||||
|
||||
stop() {
|
||||
if (this._timeout) {
|
||||
clearTimeout(this._timeout);
|
||||
this.generation += 1;
|
||||
this._timeout = null;
|
||||
}
|
||||
}
|
||||
|
||||
schedule() {
|
||||
clearTimeout(this._timeout);
|
||||
this._timeout = setTimeout(() => this._poll(), this.intervalMS);
|
||||
}
|
||||
|
||||
success(srvRecords) {
|
||||
this.haMode = false;
|
||||
this.schedule();
|
||||
this.emit('srvRecordDiscovery', new SrvPollingEvent(srvRecords));
|
||||
}
|
||||
|
||||
failure(message, obj) {
|
||||
this.logger.warn(message, obj);
|
||||
this.haMode = true;
|
||||
this.schedule();
|
||||
}
|
||||
|
||||
parentDomainMismatch(srvRecord) {
|
||||
this.logger.warn(
|
||||
`parent domain mismatch on SRV record (${srvRecord.name}:${srvRecord.port})`,
|
||||
srvRecord
|
||||
);
|
||||
}
|
||||
|
||||
_poll() {
|
||||
const generation = this.generation;
|
||||
dns.resolveSrv(this.srvAddress, (err, srvRecords) => {
|
||||
if (generation !== this.generation) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
this.failure('DNS error', err);
|
||||
return;
|
||||
}
|
||||
|
||||
const finalAddresses = [];
|
||||
srvRecords.forEach(record => {
|
||||
if (matchesParentDomain(record.name, this.srvHost)) {
|
||||
finalAddresses.push(record);
|
||||
} else {
|
||||
this.parentDomainMismatch(record);
|
||||
}
|
||||
});
|
||||
|
||||
if (!finalAddresses.length) {
|
||||
this.failure('No valid addresses found at host');
|
||||
return;
|
||||
}
|
||||
|
||||
this.success(finalAddresses);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.SrvPollingEvent = SrvPollingEvent;
|
||||
module.exports.SrvPoller = SrvPoller;
|
||||
1142
node_modules/mongodb/lib/core/sdam/topology.js
generated
vendored
Normal file
1142
node_modules/mongodb/lib/core/sdam/topology.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
441
node_modules/mongodb/lib/core/sdam/topology_description.js
generated
vendored
Normal file
441
node_modules/mongodb/lib/core/sdam/topology_description.js
generated
vendored
Normal file
@@ -0,0 +1,441 @@
|
||||
'use strict';
|
||||
const ServerType = require('./common').ServerType;
|
||||
const ServerDescription = require('./server_description').ServerDescription;
|
||||
const WIRE_CONSTANTS = require('../wireprotocol/constants');
|
||||
const TopologyType = require('./common').TopologyType;
|
||||
|
||||
// contstants related to compatability checks
|
||||
const MIN_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_SERVER_VERSION;
|
||||
const MAX_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_SERVER_VERSION;
|
||||
const MIN_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_WIRE_VERSION;
|
||||
const MAX_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_WIRE_VERSION;
|
||||
|
||||
// Representation of a deployment of servers
|
||||
class TopologyDescription {
|
||||
/**
|
||||
* Create a TopologyDescription
|
||||
*
|
||||
* @param {string} topologyType
|
||||
* @param {Map<string, ServerDescription>} serverDescriptions the a map of address to ServerDescription
|
||||
* @param {string} setName
|
||||
* @param {number} maxSetVersion
|
||||
* @param {ObjectId} maxElectionId
|
||||
*/
|
||||
constructor(
|
||||
topologyType,
|
||||
serverDescriptions,
|
||||
setName,
|
||||
maxSetVersion,
|
||||
maxElectionId,
|
||||
commonWireVersion,
|
||||
options
|
||||
) {
|
||||
options = options || {};
|
||||
|
||||
// TODO: consider assigning all these values to a temporary value `s` which
|
||||
// we use `Object.freeze` on, ensuring the internal state of this type
|
||||
// is immutable.
|
||||
this.type = topologyType || TopologyType.Unknown;
|
||||
this.setName = setName || null;
|
||||
this.maxSetVersion = maxSetVersion || null;
|
||||
this.maxElectionId = maxElectionId || null;
|
||||
this.servers = serverDescriptions || new Map();
|
||||
this.stale = false;
|
||||
this.compatible = true;
|
||||
this.compatibilityError = null;
|
||||
this.logicalSessionTimeoutMinutes = null;
|
||||
this.heartbeatFrequencyMS = options.heartbeatFrequencyMS || 0;
|
||||
this.localThresholdMS = options.localThresholdMS || 0;
|
||||
this.commonWireVersion = commonWireVersion || null;
|
||||
|
||||
// save this locally, but don't display when printing the instance out
|
||||
Object.defineProperty(this, 'options', { value: options, enumberable: false });
|
||||
|
||||
// determine server compatibility
|
||||
for (const serverDescription of this.servers.values()) {
|
||||
if (serverDescription.type === ServerType.Unknown) continue;
|
||||
|
||||
if (serverDescription.minWireVersion > MAX_SUPPORTED_WIRE_VERSION) {
|
||||
this.compatible = false;
|
||||
this.compatibilityError = `Server at ${serverDescription.address} requires wire version ${serverDescription.minWireVersion}, but this version of the driver only supports up to ${MAX_SUPPORTED_WIRE_VERSION} (MongoDB ${MAX_SUPPORTED_SERVER_VERSION})`;
|
||||
}
|
||||
|
||||
if (serverDescription.maxWireVersion < MIN_SUPPORTED_WIRE_VERSION) {
|
||||
this.compatible = false;
|
||||
this.compatibilityError = `Server at ${serverDescription.address} reports wire version ${serverDescription.maxWireVersion}, but this version of the driver requires at least ${MIN_SUPPORTED_WIRE_VERSION} (MongoDB ${MIN_SUPPORTED_SERVER_VERSION}).`;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Whenever a client updates the TopologyDescription from an ismaster response, it MUST set
|
||||
// TopologyDescription.logicalSessionTimeoutMinutes to the smallest logicalSessionTimeoutMinutes
|
||||
// value among ServerDescriptions of all data-bearing server types. If any have a null
|
||||
// logicalSessionTimeoutMinutes, then TopologyDescription.logicalSessionTimeoutMinutes MUST be
|
||||
// set to null.
|
||||
const readableServers = Array.from(this.servers.values()).filter(s => s.isReadable);
|
||||
this.logicalSessionTimeoutMinutes = readableServers.reduce((result, server) => {
|
||||
if (server.logicalSessionTimeoutMinutes == null) return null;
|
||||
if (result == null) return server.logicalSessionTimeoutMinutes;
|
||||
return Math.min(result, server.logicalSessionTimeoutMinutes);
|
||||
}, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new TopologyDescription based on the SrvPollingEvent
|
||||
* @param {SrvPollingEvent} ev The event
|
||||
*/
|
||||
updateFromSrvPollingEvent(ev) {
|
||||
const newAddresses = ev.addresses();
|
||||
const serverDescriptions = new Map(this.servers);
|
||||
for (const server of this.servers) {
|
||||
if (newAddresses.has(server[0])) {
|
||||
newAddresses.delete(server[0]);
|
||||
} else {
|
||||
serverDescriptions.delete(server[0]);
|
||||
}
|
||||
}
|
||||
|
||||
if (serverDescriptions.size === this.servers.size && newAddresses.size === 0) {
|
||||
return this;
|
||||
}
|
||||
|
||||
for (const address of newAddresses) {
|
||||
serverDescriptions.set(address, new ServerDescription(address));
|
||||
}
|
||||
|
||||
return new TopologyDescription(
|
||||
this.type,
|
||||
serverDescriptions,
|
||||
this.setName,
|
||||
this.maxSetVersion,
|
||||
this.maxElectionId,
|
||||
this.commonWireVersion,
|
||||
this.options,
|
||||
null
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a copy of this description updated with a given ServerDescription
|
||||
*
|
||||
* @param {ServerDescription} serverDescription
|
||||
*/
|
||||
update(serverDescription) {
|
||||
const address = serverDescription.address;
|
||||
// NOTE: there are a number of prime targets for refactoring here
|
||||
// once we support destructuring assignments
|
||||
|
||||
// potentially mutated values
|
||||
let topologyType = this.type;
|
||||
let setName = this.setName;
|
||||
let maxSetVersion = this.maxSetVersion;
|
||||
let maxElectionId = this.maxElectionId;
|
||||
let commonWireVersion = this.commonWireVersion;
|
||||
|
||||
if (serverDescription.setName && setName && serverDescription.setName !== setName) {
|
||||
serverDescription = new ServerDescription(address, null);
|
||||
}
|
||||
|
||||
const serverType = serverDescription.type;
|
||||
let serverDescriptions = new Map(this.servers);
|
||||
|
||||
// update common wire version
|
||||
if (serverDescription.maxWireVersion !== 0) {
|
||||
if (commonWireVersion == null) {
|
||||
commonWireVersion = serverDescription.maxWireVersion;
|
||||
} else {
|
||||
commonWireVersion = Math.min(commonWireVersion, serverDescription.maxWireVersion);
|
||||
}
|
||||
}
|
||||
|
||||
// update the actual server description
|
||||
serverDescriptions.set(address, serverDescription);
|
||||
|
||||
if (topologyType === TopologyType.Single) {
|
||||
// once we are defined as single, that never changes
|
||||
return new TopologyDescription(
|
||||
TopologyType.Single,
|
||||
serverDescriptions,
|
||||
setName,
|
||||
maxSetVersion,
|
||||
maxElectionId,
|
||||
commonWireVersion,
|
||||
this.options
|
||||
);
|
||||
}
|
||||
|
||||
if (topologyType === TopologyType.Unknown) {
|
||||
if (serverType === ServerType.Standalone && this.servers.size !== 1) {
|
||||
serverDescriptions.delete(address);
|
||||
} else {
|
||||
topologyType = topologyTypeForServerType(serverType);
|
||||
}
|
||||
}
|
||||
|
||||
if (topologyType === TopologyType.Sharded) {
|
||||
if ([ServerType.Mongos, ServerType.Unknown].indexOf(serverType) === -1) {
|
||||
serverDescriptions.delete(address);
|
||||
}
|
||||
}
|
||||
|
||||
if (topologyType === TopologyType.ReplicaSetNoPrimary) {
|
||||
if ([ServerType.Standalone, ServerType.Mongos].indexOf(serverType) >= 0) {
|
||||
serverDescriptions.delete(address);
|
||||
}
|
||||
|
||||
if (serverType === ServerType.RSPrimary) {
|
||||
const result = updateRsFromPrimary(
|
||||
serverDescriptions,
|
||||
setName,
|
||||
serverDescription,
|
||||
maxSetVersion,
|
||||
maxElectionId
|
||||
);
|
||||
|
||||
(topologyType = result[0]),
|
||||
(setName = result[1]),
|
||||
(maxSetVersion = result[2]),
|
||||
(maxElectionId = result[3]);
|
||||
} else if (
|
||||
[ServerType.RSSecondary, ServerType.RSArbiter, ServerType.RSOther].indexOf(serverType) >= 0
|
||||
) {
|
||||
const result = updateRsNoPrimaryFromMember(serverDescriptions, setName, serverDescription);
|
||||
(topologyType = result[0]), (setName = result[1]);
|
||||
}
|
||||
}
|
||||
|
||||
if (topologyType === TopologyType.ReplicaSetWithPrimary) {
|
||||
if ([ServerType.Standalone, ServerType.Mongos].indexOf(serverType) >= 0) {
|
||||
serverDescriptions.delete(address);
|
||||
topologyType = checkHasPrimary(serverDescriptions);
|
||||
} else if (serverType === ServerType.RSPrimary) {
|
||||
const result = updateRsFromPrimary(
|
||||
serverDescriptions,
|
||||
setName,
|
||||
serverDescription,
|
||||
maxSetVersion,
|
||||
maxElectionId
|
||||
);
|
||||
|
||||
(topologyType = result[0]),
|
||||
(setName = result[1]),
|
||||
(maxSetVersion = result[2]),
|
||||
(maxElectionId = result[3]);
|
||||
} else if (
|
||||
[ServerType.RSSecondary, ServerType.RSArbiter, ServerType.RSOther].indexOf(serverType) >= 0
|
||||
) {
|
||||
topologyType = updateRsWithPrimaryFromMember(
|
||||
serverDescriptions,
|
||||
setName,
|
||||
serverDescription
|
||||
);
|
||||
} else {
|
||||
topologyType = checkHasPrimary(serverDescriptions);
|
||||
}
|
||||
}
|
||||
|
||||
return new TopologyDescription(
|
||||
topologyType,
|
||||
serverDescriptions,
|
||||
setName,
|
||||
maxSetVersion,
|
||||
maxElectionId,
|
||||
commonWireVersion,
|
||||
this.options
|
||||
);
|
||||
}
|
||||
|
||||
get error() {
|
||||
const descriptionsWithError = Array.from(this.servers.values()).filter(sd => sd.error);
|
||||
if (descriptionsWithError.length > 0) {
|
||||
return descriptionsWithError[0].error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the topology description has any known servers
|
||||
*/
|
||||
get hasKnownServers() {
|
||||
return Array.from(this.servers.values()).some(sd => sd.type !== ServerType.Unknown);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if this topology description has a data-bearing server available.
|
||||
*/
|
||||
get hasDataBearingServers() {
|
||||
return Array.from(this.servers.values()).some(sd => sd.isDataBearing);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the topology has a definition for the provided address
|
||||
*
|
||||
* @param {String} address
|
||||
* @return {Boolean} Whether the topology knows about this server
|
||||
*/
|
||||
hasServer(address) {
|
||||
return this.servers.has(address);
|
||||
}
|
||||
}
|
||||
|
||||
function topologyTypeForServerType(serverType) {
|
||||
if (serverType === ServerType.Standalone) {
|
||||
return TopologyType.Single;
|
||||
}
|
||||
|
||||
if (serverType === ServerType.Mongos) {
|
||||
return TopologyType.Sharded;
|
||||
}
|
||||
|
||||
if (serverType === ServerType.RSPrimary) {
|
||||
return TopologyType.ReplicaSetWithPrimary;
|
||||
}
|
||||
|
||||
if (serverType === ServerType.RSGhost || serverType === ServerType.Unknown) {
|
||||
return TopologyType.Unknown;
|
||||
}
|
||||
|
||||
return TopologyType.ReplicaSetNoPrimary;
|
||||
}
|
||||
|
||||
function compareObjectId(oid1, oid2) {
|
||||
if (oid1 == null) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (oid2 == null) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (oid1.id instanceof Buffer && oid2.id instanceof Buffer) {
|
||||
const oid1Buffer = oid1.id;
|
||||
const oid2Buffer = oid2.id;
|
||||
return oid1Buffer.compare(oid2Buffer);
|
||||
}
|
||||
|
||||
const oid1String = oid1.toString();
|
||||
const oid2String = oid2.toString();
|
||||
return oid1String.localeCompare(oid2String);
|
||||
}
|
||||
|
||||
function updateRsFromPrimary(
|
||||
serverDescriptions,
|
||||
setName,
|
||||
serverDescription,
|
||||
maxSetVersion,
|
||||
maxElectionId
|
||||
) {
|
||||
setName = setName || serverDescription.setName;
|
||||
if (setName !== serverDescription.setName) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
|
||||
const electionId = serverDescription.electionId ? serverDescription.electionId : null;
|
||||
if (serverDescription.setVersion && electionId) {
|
||||
if (maxSetVersion && maxElectionId) {
|
||||
if (
|
||||
maxSetVersion > serverDescription.setVersion ||
|
||||
compareObjectId(maxElectionId, electionId) > 0
|
||||
) {
|
||||
// this primary is stale, we must remove it
|
||||
serverDescriptions.set(
|
||||
serverDescription.address,
|
||||
new ServerDescription(serverDescription.address)
|
||||
);
|
||||
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
}
|
||||
|
||||
maxElectionId = serverDescription.electionId;
|
||||
}
|
||||
|
||||
if (
|
||||
serverDescription.setVersion != null &&
|
||||
(maxSetVersion == null || serverDescription.setVersion > maxSetVersion)
|
||||
) {
|
||||
maxSetVersion = serverDescription.setVersion;
|
||||
}
|
||||
|
||||
// We've heard from the primary. Is it the same primary as before?
|
||||
for (const address of serverDescriptions.keys()) {
|
||||
const server = serverDescriptions.get(address);
|
||||
|
||||
if (server.type === ServerType.RSPrimary && server.address !== serverDescription.address) {
|
||||
// Reset old primary's type to Unknown.
|
||||
serverDescriptions.set(address, new ServerDescription(server.address));
|
||||
|
||||
// There can only be one primary
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Discover new hosts from this primary's response.
|
||||
serverDescription.allHosts.forEach(address => {
|
||||
if (!serverDescriptions.has(address)) {
|
||||
serverDescriptions.set(address, new ServerDescription(address));
|
||||
}
|
||||
});
|
||||
|
||||
// Remove hosts not in the response.
|
||||
const currentAddresses = Array.from(serverDescriptions.keys());
|
||||
const responseAddresses = serverDescription.allHosts;
|
||||
currentAddresses
|
||||
.filter(addr => responseAddresses.indexOf(addr) === -1)
|
||||
.forEach(address => {
|
||||
serverDescriptions.delete(address);
|
||||
});
|
||||
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
|
||||
function updateRsWithPrimaryFromMember(serverDescriptions, setName, serverDescription) {
|
||||
if (setName == null) {
|
||||
throw new TypeError('setName is required');
|
||||
}
|
||||
|
||||
if (
|
||||
setName !== serverDescription.setName ||
|
||||
(serverDescription.me && serverDescription.address !== serverDescription.me)
|
||||
) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
}
|
||||
|
||||
return checkHasPrimary(serverDescriptions);
|
||||
}
|
||||
|
||||
function updateRsNoPrimaryFromMember(serverDescriptions, setName, serverDescription) {
|
||||
let topologyType = TopologyType.ReplicaSetNoPrimary;
|
||||
|
||||
setName = setName || serverDescription.setName;
|
||||
if (setName !== serverDescription.setName) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
return [topologyType, setName];
|
||||
}
|
||||
|
||||
serverDescription.allHosts.forEach(address => {
|
||||
if (!serverDescriptions.has(address)) {
|
||||
serverDescriptions.set(address, new ServerDescription(address));
|
||||
}
|
||||
});
|
||||
|
||||
if (serverDescription.me && serverDescription.address !== serverDescription.me) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
}
|
||||
|
||||
return [topologyType, setName];
|
||||
}
|
||||
|
||||
function checkHasPrimary(serverDescriptions) {
|
||||
for (const addr of serverDescriptions.keys()) {
|
||||
if (serverDescriptions.get(addr).type === ServerType.RSPrimary) {
|
||||
return TopologyType.ReplicaSetWithPrimary;
|
||||
}
|
||||
}
|
||||
|
||||
return TopologyType.ReplicaSetNoPrimary;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
TopologyDescription
|
||||
};
|
||||
780
node_modules/mongodb/lib/core/sessions.js
generated
vendored
Normal file
780
node_modules/mongodb/lib/core/sessions.js
generated
vendored
Normal file
@@ -0,0 +1,780 @@
|
||||
'use strict';
|
||||
|
||||
const retrieveBSON = require('./connection/utils').retrieveBSON;
|
||||
const EventEmitter = require('events');
|
||||
const BSON = retrieveBSON();
|
||||
const Binary = BSON.Binary;
|
||||
const uuidV4 = require('./utils').uuidV4;
|
||||
const MongoError = require('./error').MongoError;
|
||||
const isRetryableError = require('././error').isRetryableError;
|
||||
const MongoNetworkError = require('./error').MongoNetworkError;
|
||||
const MongoWriteConcernError = require('./error').MongoWriteConcernError;
|
||||
const Transaction = require('./transactions').Transaction;
|
||||
const TxnState = require('./transactions').TxnState;
|
||||
const isPromiseLike = require('./utils').isPromiseLike;
|
||||
const ReadPreference = require('./topologies/read_preference');
|
||||
const maybePromise = require('../utils').maybePromise;
|
||||
const isTransactionCommand = require('./transactions').isTransactionCommand;
|
||||
const resolveClusterTime = require('./topologies/shared').resolveClusterTime;
|
||||
const isSharded = require('./wireprotocol/shared').isSharded;
|
||||
const maxWireVersion = require('./utils').maxWireVersion;
|
||||
const now = require('./../utils').now;
|
||||
const calculateDurationInMs = require('./../utils').calculateDurationInMs;
|
||||
const minWireVersionForShardedTransactions = 8;
|
||||
|
||||
function assertAlive(session, callback) {
|
||||
if (session.serverSession == null) {
|
||||
const error = new MongoError('Cannot use a session that has ended');
|
||||
if (typeof callback === 'function') {
|
||||
callback(error, null);
|
||||
return false;
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Options to pass when creating a Client Session
|
||||
* @typedef {Object} SessionOptions
|
||||
* @property {boolean} [causalConsistency=true] Whether causal consistency should be enabled on this session
|
||||
* @property {TransactionOptions} [defaultTransactionOptions] The default TransactionOptions to use for transactions started on this session.
|
||||
*/
|
||||
|
||||
/**
|
||||
* A BSON document reflecting the lsid of a {@link ClientSession}
|
||||
* @typedef {Object} SessionId
|
||||
*/
|
||||
|
||||
const kServerSession = Symbol('serverSession');
|
||||
|
||||
/**
|
||||
* A class representing a client session on the server
|
||||
* WARNING: not meant to be instantiated directly.
|
||||
* @class
|
||||
* @hideconstructor
|
||||
*/
|
||||
class ClientSession extends EventEmitter {
|
||||
/**
|
||||
* Create a client session.
|
||||
* WARNING: not meant to be instantiated directly
|
||||
*
|
||||
* @param {Topology} topology The current client's topology (Internal Class)
|
||||
* @param {ServerSessionPool} sessionPool The server session pool (Internal Class)
|
||||
* @param {SessionOptions} [options] Optional settings
|
||||
* @param {Object} [clientOptions] Optional settings provided when creating a client in the porcelain driver
|
||||
*/
|
||||
constructor(topology, sessionPool, options, clientOptions) {
|
||||
super();
|
||||
|
||||
if (topology == null) {
|
||||
throw new Error('ClientSession requires a topology');
|
||||
}
|
||||
|
||||
if (sessionPool == null || !(sessionPool instanceof ServerSessionPool)) {
|
||||
throw new Error('ClientSession requires a ServerSessionPool');
|
||||
}
|
||||
|
||||
options = options || {};
|
||||
clientOptions = clientOptions || {};
|
||||
|
||||
this.topology = topology;
|
||||
this.sessionPool = sessionPool;
|
||||
this.hasEnded = false;
|
||||
this.clientOptions = clientOptions;
|
||||
this[kServerSession] = undefined;
|
||||
|
||||
this.supports = {
|
||||
causalConsistency:
|
||||
typeof options.causalConsistency !== 'undefined' ? options.causalConsistency : true
|
||||
};
|
||||
|
||||
this.clusterTime = options.initialClusterTime;
|
||||
|
||||
this.operationTime = null;
|
||||
this.explicit = !!options.explicit;
|
||||
this.owner = options.owner;
|
||||
this.defaultTransactionOptions = Object.assign({}, options.defaultTransactionOptions);
|
||||
this.transaction = new Transaction();
|
||||
}
|
||||
|
||||
/**
|
||||
* The server id associated with this session
|
||||
* @type {SessionId}
|
||||
*/
|
||||
get id() {
|
||||
return this.serverSession.id;
|
||||
}
|
||||
|
||||
get serverSession() {
|
||||
if (this[kServerSession] == null) {
|
||||
this[kServerSession] = this.sessionPool.acquire();
|
||||
}
|
||||
|
||||
return this[kServerSession];
|
||||
}
|
||||
|
||||
/**
|
||||
* Ends this session on the server
|
||||
*
|
||||
* @param {Object} [options] Optional settings. Currently reserved for future use
|
||||
* @param {Function} [callback] Optional callback for completion of this operation
|
||||
*/
|
||||
endSession(options, callback) {
|
||||
if (typeof options === 'function') (callback = options), (options = {});
|
||||
options = options || {};
|
||||
|
||||
const session = this;
|
||||
return maybePromise(this, callback, done => {
|
||||
if (session.hasEnded) {
|
||||
return done();
|
||||
}
|
||||
|
||||
function completeEndSession() {
|
||||
// release the server session back to the pool
|
||||
session.sessionPool.release(session.serverSession);
|
||||
session[kServerSession] = undefined;
|
||||
|
||||
// mark the session as ended, and emit a signal
|
||||
session.hasEnded = true;
|
||||
session.emit('ended', session);
|
||||
|
||||
// spec indicates that we should ignore all errors for `endSessions`
|
||||
done();
|
||||
}
|
||||
|
||||
if (session.serverSession && session.inTransaction()) {
|
||||
session.abortTransaction(err => {
|
||||
if (err) return done(err);
|
||||
completeEndSession();
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
completeEndSession();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Advances the operationTime for a ClientSession.
|
||||
*
|
||||
* @param {Timestamp} operationTime the `BSON.Timestamp` of the operation type it is desired to advance to
|
||||
*/
|
||||
advanceOperationTime(operationTime) {
|
||||
if (this.operationTime == null) {
|
||||
this.operationTime = operationTime;
|
||||
return;
|
||||
}
|
||||
|
||||
if (operationTime.greaterThan(this.operationTime)) {
|
||||
this.operationTime = operationTime;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Used to determine if this session equals another
|
||||
* @param {ClientSession} session
|
||||
* @return {boolean} true if the sessions are equal
|
||||
*/
|
||||
equals(session) {
|
||||
if (!(session instanceof ClientSession)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return this.id.id.buffer.equals(session.id.id.buffer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment the transaction number on the internal ServerSession
|
||||
*/
|
||||
incrementTransactionNumber() {
|
||||
this.serverSession.txnNumber++;
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns {boolean} whether this session is currently in a transaction or not
|
||||
*/
|
||||
inTransaction() {
|
||||
return this.transaction.isActive;
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts a new transaction with the given options.
|
||||
*
|
||||
* @param {TransactionOptions} options Options for the transaction
|
||||
*/
|
||||
startTransaction(options) {
|
||||
assertAlive(this);
|
||||
if (this.inTransaction()) {
|
||||
throw new MongoError('Transaction already in progress');
|
||||
}
|
||||
|
||||
const topologyMaxWireVersion = maxWireVersion(this.topology);
|
||||
if (
|
||||
isSharded(this.topology) &&
|
||||
topologyMaxWireVersion != null &&
|
||||
topologyMaxWireVersion < minWireVersionForShardedTransactions
|
||||
) {
|
||||
throw new MongoError('Transactions are not supported on sharded clusters in MongoDB < 4.2.');
|
||||
}
|
||||
|
||||
// increment txnNumber
|
||||
this.incrementTransactionNumber();
|
||||
|
||||
// create transaction state
|
||||
this.transaction = new Transaction(
|
||||
Object.assign({}, this.clientOptions, options || this.defaultTransactionOptions)
|
||||
);
|
||||
|
||||
this.transaction.transition(TxnState.STARTING_TRANSACTION);
|
||||
}
|
||||
|
||||
/**
|
||||
* Commits the currently active transaction in this session.
|
||||
*
|
||||
* @param {Function} [callback] optional callback for completion of this operation
|
||||
* @return {Promise} A promise is returned if no callback is provided
|
||||
*/
|
||||
commitTransaction(callback) {
|
||||
return maybePromise(this, callback, done => endTransaction(this, 'commitTransaction', done));
|
||||
}
|
||||
|
||||
/**
|
||||
* Aborts the currently active transaction in this session.
|
||||
*
|
||||
* @param {Function} [callback] optional callback for completion of this operation
|
||||
* @return {Promise} A promise is returned if no callback is provided
|
||||
*/
|
||||
abortTransaction(callback) {
|
||||
return maybePromise(this, callback, done => endTransaction(this, 'abortTransaction', done));
|
||||
}
|
||||
|
||||
/**
|
||||
* This is here to ensure that ClientSession is never serialized to BSON.
|
||||
* @ignore
|
||||
*/
|
||||
toBSON() {
|
||||
throw new Error('ClientSession cannot be serialized to BSON.');
|
||||
}
|
||||
|
||||
/**
|
||||
* A user provided function to be run within a transaction
|
||||
*
|
||||
* @callback WithTransactionCallback
|
||||
* @param {ClientSession} session The parent session of the transaction running the operation. This should be passed into each operation within the lambda.
|
||||
* @returns {Promise} The resulting Promise of operations run within this transaction
|
||||
*/
|
||||
|
||||
/**
|
||||
* Runs a provided lambda within a transaction, retrying either the commit operation
|
||||
* or entire transaction as needed (and when the error permits) to better ensure that
|
||||
* the transaction can complete successfully.
|
||||
*
|
||||
* IMPORTANT: This method requires the user to return a Promise, all lambdas that do not
|
||||
* return a Promise will result in undefined behavior.
|
||||
*
|
||||
* @param {WithTransactionCallback} fn
|
||||
* @param {TransactionOptions} [options] Optional settings for the transaction
|
||||
*/
|
||||
withTransaction(fn, options) {
|
||||
const startTime = now();
|
||||
return attemptTransaction(this, startTime, fn, options);
|
||||
}
|
||||
}
|
||||
|
||||
const MAX_WITH_TRANSACTION_TIMEOUT = 120000;
|
||||
const UNSATISFIABLE_WRITE_CONCERN_CODE = 100;
|
||||
const UNKNOWN_REPL_WRITE_CONCERN_CODE = 79;
|
||||
const MAX_TIME_MS_EXPIRED_CODE = 50;
|
||||
const NON_DETERMINISTIC_WRITE_CONCERN_ERRORS = new Set([
|
||||
'CannotSatisfyWriteConcern',
|
||||
'UnknownReplWriteConcern',
|
||||
'UnsatisfiableWriteConcern'
|
||||
]);
|
||||
|
||||
function hasNotTimedOut(startTime, max) {
|
||||
return calculateDurationInMs(startTime) < max;
|
||||
}
|
||||
|
||||
function isUnknownTransactionCommitResult(err) {
|
||||
return (
|
||||
isMaxTimeMSExpiredError(err) ||
|
||||
(!NON_DETERMINISTIC_WRITE_CONCERN_ERRORS.has(err.codeName) &&
|
||||
err.code !== UNSATISFIABLE_WRITE_CONCERN_CODE &&
|
||||
err.code !== UNKNOWN_REPL_WRITE_CONCERN_CODE)
|
||||
);
|
||||
}
|
||||
|
||||
function isMaxTimeMSExpiredError(err) {
|
||||
if (err == null) return false;
|
||||
return (
|
||||
err.code === MAX_TIME_MS_EXPIRED_CODE ||
|
||||
(err.writeConcernError && err.writeConcernError.code === MAX_TIME_MS_EXPIRED_CODE)
|
||||
);
|
||||
}
|
||||
|
||||
function attemptTransactionCommit(session, startTime, fn, options) {
|
||||
return session.commitTransaction().catch(err => {
|
||||
if (
|
||||
err instanceof MongoError &&
|
||||
hasNotTimedOut(startTime, MAX_WITH_TRANSACTION_TIMEOUT) &&
|
||||
!isMaxTimeMSExpiredError(err)
|
||||
) {
|
||||
if (err.hasErrorLabel('UnknownTransactionCommitResult')) {
|
||||
return attemptTransactionCommit(session, startTime, fn, options);
|
||||
}
|
||||
|
||||
if (err.hasErrorLabel('TransientTransactionError')) {
|
||||
return attemptTransaction(session, startTime, fn, options);
|
||||
}
|
||||
}
|
||||
|
||||
throw err;
|
||||
});
|
||||
}
|
||||
|
||||
const USER_EXPLICIT_TXN_END_STATES = new Set([
|
||||
TxnState.NO_TRANSACTION,
|
||||
TxnState.TRANSACTION_COMMITTED,
|
||||
TxnState.TRANSACTION_ABORTED
|
||||
]);
|
||||
|
||||
function userExplicitlyEndedTransaction(session) {
|
||||
return USER_EXPLICIT_TXN_END_STATES.has(session.transaction.state);
|
||||
}
|
||||
|
||||
function attemptTransaction(session, startTime, fn, options) {
|
||||
session.startTransaction(options);
|
||||
|
||||
let promise;
|
||||
try {
|
||||
promise = fn(session);
|
||||
} catch (err) {
|
||||
promise = Promise.reject(err);
|
||||
}
|
||||
|
||||
if (!isPromiseLike(promise)) {
|
||||
session.abortTransaction();
|
||||
throw new TypeError('Function provided to `withTransaction` must return a Promise');
|
||||
}
|
||||
|
||||
return promise
|
||||
.then(() => {
|
||||
if (userExplicitlyEndedTransaction(session)) {
|
||||
return;
|
||||
}
|
||||
|
||||
return attemptTransactionCommit(session, startTime, fn, options);
|
||||
})
|
||||
.catch(err => {
|
||||
function maybeRetryOrThrow(err) {
|
||||
if (
|
||||
err instanceof MongoError &&
|
||||
err.hasErrorLabel('TransientTransactionError') &&
|
||||
hasNotTimedOut(startTime, MAX_WITH_TRANSACTION_TIMEOUT)
|
||||
) {
|
||||
return attemptTransaction(session, startTime, fn, options);
|
||||
}
|
||||
|
||||
if (isMaxTimeMSExpiredError(err)) {
|
||||
err.addErrorLabel('UnknownTransactionCommitResult');
|
||||
}
|
||||
|
||||
throw err;
|
||||
}
|
||||
|
||||
if (session.transaction.isActive) {
|
||||
return session.abortTransaction().then(() => maybeRetryOrThrow(err));
|
||||
}
|
||||
|
||||
return maybeRetryOrThrow(err);
|
||||
});
|
||||
}
|
||||
|
||||
function endTransaction(session, commandName, callback) {
|
||||
if (!assertAlive(session, callback)) {
|
||||
// checking result in case callback was called
|
||||
return;
|
||||
}
|
||||
|
||||
// handle any initial problematic cases
|
||||
let txnState = session.transaction.state;
|
||||
|
||||
if (txnState === TxnState.NO_TRANSACTION) {
|
||||
callback(new MongoError('No transaction started'));
|
||||
return;
|
||||
}
|
||||
|
||||
if (commandName === 'commitTransaction') {
|
||||
if (
|
||||
txnState === TxnState.STARTING_TRANSACTION ||
|
||||
txnState === TxnState.TRANSACTION_COMMITTED_EMPTY
|
||||
) {
|
||||
// the transaction was never started, we can safely exit here
|
||||
session.transaction.transition(TxnState.TRANSACTION_COMMITTED_EMPTY);
|
||||
callback(null, null);
|
||||
return;
|
||||
}
|
||||
|
||||
if (txnState === TxnState.TRANSACTION_ABORTED) {
|
||||
callback(new MongoError('Cannot call commitTransaction after calling abortTransaction'));
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (txnState === TxnState.STARTING_TRANSACTION) {
|
||||
// the transaction was never started, we can safely exit here
|
||||
session.transaction.transition(TxnState.TRANSACTION_ABORTED);
|
||||
callback(null, null);
|
||||
return;
|
||||
}
|
||||
|
||||
if (txnState === TxnState.TRANSACTION_ABORTED) {
|
||||
callback(new MongoError('Cannot call abortTransaction twice'));
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
txnState === TxnState.TRANSACTION_COMMITTED ||
|
||||
txnState === TxnState.TRANSACTION_COMMITTED_EMPTY
|
||||
) {
|
||||
callback(new MongoError('Cannot call abortTransaction after calling commitTransaction'));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// construct and send the command
|
||||
const command = { [commandName]: 1 };
|
||||
|
||||
// apply a writeConcern if specified
|
||||
let writeConcern;
|
||||
if (session.transaction.options.writeConcern) {
|
||||
writeConcern = Object.assign({}, session.transaction.options.writeConcern);
|
||||
} else if (session.clientOptions && session.clientOptions.w) {
|
||||
writeConcern = { w: session.clientOptions.w };
|
||||
}
|
||||
|
||||
if (txnState === TxnState.TRANSACTION_COMMITTED) {
|
||||
writeConcern = Object.assign({ wtimeout: 10000 }, writeConcern, { w: 'majority' });
|
||||
}
|
||||
|
||||
if (writeConcern) {
|
||||
Object.assign(command, { writeConcern });
|
||||
}
|
||||
|
||||
if (commandName === 'commitTransaction' && session.transaction.options.maxTimeMS) {
|
||||
Object.assign(command, { maxTimeMS: session.transaction.options.maxTimeMS });
|
||||
}
|
||||
|
||||
function commandHandler(e, r) {
|
||||
if (commandName === 'commitTransaction') {
|
||||
session.transaction.transition(TxnState.TRANSACTION_COMMITTED);
|
||||
|
||||
if (
|
||||
e &&
|
||||
(e instanceof MongoNetworkError ||
|
||||
e instanceof MongoWriteConcernError ||
|
||||
isRetryableError(e) ||
|
||||
isMaxTimeMSExpiredError(e))
|
||||
) {
|
||||
if (isUnknownTransactionCommitResult(e)) {
|
||||
e.addErrorLabel('UnknownTransactionCommitResult');
|
||||
|
||||
// per txns spec, must unpin session in this case
|
||||
session.transaction.unpinServer();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
session.transaction.transition(TxnState.TRANSACTION_ABORTED);
|
||||
}
|
||||
|
||||
callback(e, r);
|
||||
}
|
||||
|
||||
// The spec indicates that we should ignore all errors on `abortTransaction`
|
||||
function transactionError(err) {
|
||||
return commandName === 'commitTransaction' ? err : null;
|
||||
}
|
||||
|
||||
if (
|
||||
// Assumption here that commandName is "commitTransaction" or "abortTransaction"
|
||||
session.transaction.recoveryToken &&
|
||||
supportsRecoveryToken(session)
|
||||
) {
|
||||
command.recoveryToken = session.transaction.recoveryToken;
|
||||
}
|
||||
|
||||
// send the command
|
||||
session.topology.command('admin.$cmd', command, { session }, (err, reply) => {
|
||||
if (err && isRetryableError(err)) {
|
||||
// SPEC-1185: apply majority write concern when retrying commitTransaction
|
||||
if (command.commitTransaction) {
|
||||
// per txns spec, must unpin session in this case
|
||||
session.transaction.unpinServer();
|
||||
|
||||
command.writeConcern = Object.assign({ wtimeout: 10000 }, command.writeConcern, {
|
||||
w: 'majority'
|
||||
});
|
||||
}
|
||||
|
||||
return session.topology.command('admin.$cmd', command, { session }, (_err, _reply) =>
|
||||
commandHandler(transactionError(_err), _reply)
|
||||
);
|
||||
}
|
||||
|
||||
commandHandler(transactionError(err), reply);
|
||||
});
|
||||
}
|
||||
|
||||
function supportsRecoveryToken(session) {
|
||||
const topology = session.topology;
|
||||
return !!topology.s.options.useRecoveryToken;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reflects the existence of a session on the server. Can be reused by the session pool.
|
||||
* WARNING: not meant to be instantiated directly. For internal use only.
|
||||
* @ignore
|
||||
*/
|
||||
class ServerSession {
|
||||
constructor() {
|
||||
this.id = { id: new Binary(uuidV4(), Binary.SUBTYPE_UUID) };
|
||||
this.lastUse = now();
|
||||
this.txnNumber = 0;
|
||||
this.isDirty = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the server session has timed out.
|
||||
* @ignore
|
||||
* @param {Date} sessionTimeoutMinutes The server's "logicalSessionTimeoutMinutes"
|
||||
* @return {boolean} true if the session has timed out.
|
||||
*/
|
||||
hasTimedOut(sessionTimeoutMinutes) {
|
||||
// Take the difference of the lastUse timestamp and now, which will result in a value in
|
||||
// milliseconds, and then convert milliseconds to minutes to compare to `sessionTimeoutMinutes`
|
||||
const idleTimeMinutes = Math.round(
|
||||
((calculateDurationInMs(this.lastUse) % 86400000) % 3600000) / 60000
|
||||
);
|
||||
|
||||
return idleTimeMinutes > sessionTimeoutMinutes - 1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Maintains a pool of Server Sessions.
|
||||
* For internal use only
|
||||
* @ignore
|
||||
*/
|
||||
class ServerSessionPool {
|
||||
constructor(topology) {
|
||||
if (topology == null) {
|
||||
throw new Error('ServerSessionPool requires a topology');
|
||||
}
|
||||
|
||||
this.topology = topology;
|
||||
this.sessions = [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Ends all sessions in the session pool.
|
||||
* @ignore
|
||||
*/
|
||||
endAllPooledSessions(callback) {
|
||||
if (this.sessions.length) {
|
||||
this.topology.endSessions(
|
||||
this.sessions.map(session => session.id),
|
||||
() => {
|
||||
this.sessions = [];
|
||||
if (typeof callback === 'function') {
|
||||
callback();
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (typeof callback === 'function') {
|
||||
callback();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquire a Server Session from the pool.
|
||||
* Iterates through each session in the pool, removing any stale sessions
|
||||
* along the way. The first non-stale session found is removed from the
|
||||
* pool and returned. If no non-stale session is found, a new ServerSession
|
||||
* is created.
|
||||
* @ignore
|
||||
* @returns {ServerSession}
|
||||
*/
|
||||
acquire() {
|
||||
const sessionTimeoutMinutes = this.topology.logicalSessionTimeoutMinutes;
|
||||
while (this.sessions.length) {
|
||||
const session = this.sessions.shift();
|
||||
if (!session.hasTimedOut(sessionTimeoutMinutes)) {
|
||||
return session;
|
||||
}
|
||||
}
|
||||
|
||||
return new ServerSession();
|
||||
}
|
||||
|
||||
/**
|
||||
* Release a session to the session pool
|
||||
* Adds the session back to the session pool if the session has not timed out yet.
|
||||
* This method also removes any stale sessions from the pool.
|
||||
* @ignore
|
||||
* @param {ServerSession} session The session to release to the pool
|
||||
*/
|
||||
release(session) {
|
||||
const sessionTimeoutMinutes = this.topology.logicalSessionTimeoutMinutes;
|
||||
while (this.sessions.length) {
|
||||
const pooledSession = this.sessions[this.sessions.length - 1];
|
||||
if (pooledSession.hasTimedOut(sessionTimeoutMinutes)) {
|
||||
this.sessions.pop();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!session.hasTimedOut(sessionTimeoutMinutes)) {
|
||||
if (session.isDirty) {
|
||||
return;
|
||||
}
|
||||
|
||||
// otherwise, readd this session to the session pool
|
||||
this.sessions.unshift(session);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: this should be codified in command construction
|
||||
// @see https://github.com/mongodb/specifications/blob/master/source/read-write-concern/read-write-concern.rst#read-concern
|
||||
function commandSupportsReadConcern(command, options) {
|
||||
if (
|
||||
command.aggregate ||
|
||||
command.count ||
|
||||
command.distinct ||
|
||||
command.find ||
|
||||
command.parallelCollectionScan ||
|
||||
command.geoNear ||
|
||||
command.geoSearch
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (
|
||||
command.mapReduce &&
|
||||
options &&
|
||||
options.out &&
|
||||
(options.out.inline === 1 || options.out === 'inline')
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Optionally decorate a command with sessions specific keys
|
||||
*
|
||||
* @ignore
|
||||
* @param {ClientSession} session the session tracking transaction state
|
||||
* @param {Object} command the command to decorate
|
||||
* @param {Object} topology the topology for tracking the cluster time
|
||||
* @param {Object} [options] Optional settings passed to calling operation
|
||||
* @return {MongoError|null} An error, if some error condition was met
|
||||
*/
|
||||
function applySession(session, command, options) {
|
||||
if (session.hasEnded) {
|
||||
// TODO: merge this with `assertAlive`, did not want to throw a try/catch here
|
||||
return new MongoError('Cannot use a session that has ended');
|
||||
}
|
||||
|
||||
// SPEC-1019: silently ignore explicit session with unacknowledged write for backwards compatibility
|
||||
if (options && options.writeConcern && options.writeConcern.w === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const serverSession = session.serverSession;
|
||||
serverSession.lastUse = now();
|
||||
command.lsid = serverSession.id;
|
||||
|
||||
// first apply non-transaction-specific sessions data
|
||||
const inTransaction = session.inTransaction() || isTransactionCommand(command);
|
||||
const isRetryableWrite = options.willRetryWrite;
|
||||
const shouldApplyReadConcern = commandSupportsReadConcern(command, options);
|
||||
|
||||
if (serverSession.txnNumber && (isRetryableWrite || inTransaction)) {
|
||||
command.txnNumber = BSON.Long.fromNumber(serverSession.txnNumber);
|
||||
}
|
||||
|
||||
// now attempt to apply transaction-specific sessions data
|
||||
if (!inTransaction) {
|
||||
if (session.transaction.state !== TxnState.NO_TRANSACTION) {
|
||||
session.transaction.transition(TxnState.NO_TRANSACTION);
|
||||
}
|
||||
|
||||
// TODO: the following should only be applied to read operation per spec.
|
||||
// for causal consistency
|
||||
if (session.supports.causalConsistency && session.operationTime && shouldApplyReadConcern) {
|
||||
command.readConcern = command.readConcern || {};
|
||||
Object.assign(command.readConcern, { afterClusterTime: session.operationTime });
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (options.readPreference && !options.readPreference.equals(ReadPreference.primary)) {
|
||||
return new MongoError(
|
||||
`Read preference in a transaction must be primary, not: ${options.readPreference.mode}`
|
||||
);
|
||||
}
|
||||
|
||||
// `autocommit` must always be false to differentiate from retryable writes
|
||||
command.autocommit = false;
|
||||
|
||||
if (session.transaction.state === TxnState.STARTING_TRANSACTION) {
|
||||
session.transaction.transition(TxnState.TRANSACTION_IN_PROGRESS);
|
||||
command.startTransaction = true;
|
||||
|
||||
const readConcern =
|
||||
session.transaction.options.readConcern || session.clientOptions.readConcern;
|
||||
if (readConcern) {
|
||||
command.readConcern = readConcern;
|
||||
}
|
||||
|
||||
if (session.supports.causalConsistency && session.operationTime) {
|
||||
command.readConcern = command.readConcern || {};
|
||||
Object.assign(command.readConcern, { afterClusterTime: session.operationTime });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function updateSessionFromResponse(session, document) {
|
||||
if (document.$clusterTime) {
|
||||
resolveClusterTime(session, document.$clusterTime);
|
||||
}
|
||||
|
||||
if (document.operationTime && session && session.supports.causalConsistency) {
|
||||
session.advanceOperationTime(document.operationTime);
|
||||
}
|
||||
|
||||
if (document.recoveryToken && session && session.inTransaction()) {
|
||||
session.transaction._recoveryToken = document.recoveryToken;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
ClientSession,
|
||||
ServerSession,
|
||||
ServerSessionPool,
|
||||
TxnState,
|
||||
applySession,
|
||||
updateSessionFromResponse,
|
||||
commandSupportsReadConcern
|
||||
};
|
||||
61
node_modules/mongodb/lib/core/tools/smoke_plugin.js
generated
vendored
Normal file
61
node_modules/mongodb/lib/core/tools/smoke_plugin.js
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
'use strict';
|
||||
|
||||
var fs = require('fs');
|
||||
|
||||
/* Note: because this plugin uses process.on('uncaughtException'), only one
|
||||
* of these can exist at any given time. This plugin and anything else that
|
||||
* uses process.on('uncaughtException') will conflict. */
|
||||
exports.attachToRunner = function(runner, outputFile) {
|
||||
var smokeOutput = { results: [] };
|
||||
var runningTests = {};
|
||||
|
||||
var integraPlugin = {
|
||||
beforeTest: function(test, callback) {
|
||||
test.startTime = Date.now();
|
||||
runningTests[test.name] = test;
|
||||
callback();
|
||||
},
|
||||
afterTest: function(test, callback) {
|
||||
smokeOutput.results.push({
|
||||
status: test.status,
|
||||
start: test.startTime,
|
||||
end: Date.now(),
|
||||
test_file: test.name,
|
||||
exit_code: 0,
|
||||
url: ''
|
||||
});
|
||||
delete runningTests[test.name];
|
||||
callback();
|
||||
},
|
||||
beforeExit: function(obj, callback) {
|
||||
fs.writeFile(outputFile, JSON.stringify(smokeOutput), function() {
|
||||
callback();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
// In case of exception, make sure we write file
|
||||
process.on('uncaughtException', function(err) {
|
||||
// Mark all currently running tests as failed
|
||||
for (var testName in runningTests) {
|
||||
smokeOutput.results.push({
|
||||
status: 'fail',
|
||||
start: runningTests[testName].startTime,
|
||||
end: Date.now(),
|
||||
test_file: testName,
|
||||
exit_code: 0,
|
||||
url: ''
|
||||
});
|
||||
}
|
||||
|
||||
// write file
|
||||
fs.writeFileSync(outputFile, JSON.stringify(smokeOutput));
|
||||
|
||||
// Standard NodeJS uncaught exception handler
|
||||
console.error(err.stack);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
runner.plugin(integraPlugin);
|
||||
return integraPlugin;
|
||||
};
|
||||
1396
node_modules/mongodb/lib/core/topologies/mongos.js
generated
vendored
Normal file
1396
node_modules/mongodb/lib/core/topologies/mongos.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
266
node_modules/mongodb/lib/core/topologies/read_preference.js
generated
vendored
Normal file
266
node_modules/mongodb/lib/core/topologies/read_preference.js
generated
vendored
Normal file
@@ -0,0 +1,266 @@
|
||||
'use strict';
|
||||
|
||||
/**
|
||||
* The **ReadPreference** class is a class that represents a MongoDB ReadPreference and is
|
||||
* used to construct connections.
|
||||
* @class
|
||||
* @param {string} mode A string describing the read preference mode (primary|primaryPreferred|secondary|secondaryPreferred|nearest)
|
||||
* @param {array} tags The tags object
|
||||
* @param {object} [options] Additional read preference options
|
||||
* @param {number} [options.maxStalenessSeconds] Max secondary read staleness in seconds, Minimum value is 90 seconds.
|
||||
* @param {object} [options.hedge] Server mode in which the same query is dispatched in parallel to multiple replica set members.
|
||||
* @param {boolean} [options.hedge.enabled] Explicitly enable or disable hedged reads.
|
||||
* @see https://docs.mongodb.com/manual/core/read-preference/
|
||||
* @return {ReadPreference}
|
||||
*/
|
||||
const ReadPreference = function(mode, tags, options) {
|
||||
if (!ReadPreference.isValid(mode)) {
|
||||
throw new TypeError(`Invalid read preference mode ${mode}`);
|
||||
}
|
||||
|
||||
// TODO(major): tags MUST be an array of tagsets
|
||||
if (tags && !Array.isArray(tags)) {
|
||||
console.warn(
|
||||
'ReadPreference tags must be an array, this will change in the next major version'
|
||||
);
|
||||
|
||||
const tagsHasMaxStalenessSeconds = typeof tags.maxStalenessSeconds !== 'undefined';
|
||||
const tagsHasHedge = typeof tags.hedge !== 'undefined';
|
||||
const tagsHasOptions = tagsHasMaxStalenessSeconds || tagsHasHedge;
|
||||
if (tagsHasOptions) {
|
||||
// this is likely an options object
|
||||
options = tags;
|
||||
tags = undefined;
|
||||
} else {
|
||||
tags = [tags];
|
||||
}
|
||||
}
|
||||
|
||||
this.mode = mode;
|
||||
this.tags = tags;
|
||||
this.hedge = options && options.hedge;
|
||||
|
||||
options = options || {};
|
||||
if (options.maxStalenessSeconds != null) {
|
||||
if (options.maxStalenessSeconds <= 0) {
|
||||
throw new TypeError('maxStalenessSeconds must be a positive integer');
|
||||
}
|
||||
|
||||
this.maxStalenessSeconds = options.maxStalenessSeconds;
|
||||
|
||||
// NOTE: The minimum required wire version is 5 for this read preference. If the existing
|
||||
// topology has a lower value then a MongoError will be thrown during server selection.
|
||||
this.minWireVersion = 5;
|
||||
}
|
||||
|
||||
if (this.mode === ReadPreference.PRIMARY) {
|
||||
if (this.tags && Array.isArray(this.tags) && this.tags.length > 0) {
|
||||
throw new TypeError('Primary read preference cannot be combined with tags');
|
||||
}
|
||||
|
||||
if (this.maxStalenessSeconds) {
|
||||
throw new TypeError('Primary read preference cannot be combined with maxStalenessSeconds');
|
||||
}
|
||||
|
||||
if (this.hedge) {
|
||||
throw new TypeError('Primary read preference cannot be combined with hedge');
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Support the deprecated `preference` property introduced in the porcelain layer
|
||||
Object.defineProperty(ReadPreference.prototype, 'preference', {
|
||||
enumerable: true,
|
||||
get: function() {
|
||||
return this.mode;
|
||||
}
|
||||
});
|
||||
|
||||
/*
|
||||
* Read preference mode constants
|
||||
*/
|
||||
ReadPreference.PRIMARY = 'primary';
|
||||
ReadPreference.PRIMARY_PREFERRED = 'primaryPreferred';
|
||||
ReadPreference.SECONDARY = 'secondary';
|
||||
ReadPreference.SECONDARY_PREFERRED = 'secondaryPreferred';
|
||||
ReadPreference.NEAREST = 'nearest';
|
||||
|
||||
const VALID_MODES = [
|
||||
ReadPreference.PRIMARY,
|
||||
ReadPreference.PRIMARY_PREFERRED,
|
||||
ReadPreference.SECONDARY,
|
||||
ReadPreference.SECONDARY_PREFERRED,
|
||||
ReadPreference.NEAREST,
|
||||
null
|
||||
];
|
||||
|
||||
/**
|
||||
* Construct a ReadPreference given an options object.
|
||||
*
|
||||
* @param {object} options The options object from which to extract the read preference.
|
||||
* @return {ReadPreference}
|
||||
*/
|
||||
ReadPreference.fromOptions = function(options) {
|
||||
if (!options) return null;
|
||||
const readPreference = options.readPreference;
|
||||
if (!readPreference) return null;
|
||||
const readPreferenceTags = options.readPreferenceTags;
|
||||
const maxStalenessSeconds = options.maxStalenessSeconds;
|
||||
if (typeof readPreference === 'string') {
|
||||
return new ReadPreference(readPreference, readPreferenceTags);
|
||||
} else if (!(readPreference instanceof ReadPreference) && typeof readPreference === 'object') {
|
||||
const mode = readPreference.mode || readPreference.preference;
|
||||
if (mode && typeof mode === 'string') {
|
||||
return new ReadPreference(mode, readPreference.tags, {
|
||||
maxStalenessSeconds: readPreference.maxStalenessSeconds || maxStalenessSeconds,
|
||||
hedge: readPreference.hedge
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return readPreference;
|
||||
};
|
||||
|
||||
/**
|
||||
* Resolves a read preference based on well-defined inheritance rules. This method will not only
|
||||
* determine the read preference (if there is one), but will also ensure the returned value is a
|
||||
* properly constructed instance of `ReadPreference`.
|
||||
*
|
||||
* @param {Collection|Db|MongoClient} parent The parent of the operation on which to determine the read
|
||||
* preference, used for determining the inherited read preference.
|
||||
* @param {object} options The options passed into the method, potentially containing a read preference
|
||||
* @returns {(ReadPreference|null)} The resolved read preference
|
||||
*/
|
||||
ReadPreference.resolve = function(parent, options) {
|
||||
options = options || {};
|
||||
const session = options.session;
|
||||
|
||||
const inheritedReadPreference = parent && parent.readPreference;
|
||||
|
||||
let readPreference;
|
||||
if (options.readPreference) {
|
||||
readPreference = ReadPreference.fromOptions(options);
|
||||
} else if (session && session.inTransaction() && session.transaction.options.readPreference) {
|
||||
// The transaction’s read preference MUST override all other user configurable read preferences.
|
||||
readPreference = session.transaction.options.readPreference;
|
||||
} else if (inheritedReadPreference != null) {
|
||||
readPreference = inheritedReadPreference;
|
||||
} else {
|
||||
readPreference = ReadPreference.primary;
|
||||
}
|
||||
|
||||
return typeof readPreference === 'string' ? new ReadPreference(readPreference) : readPreference;
|
||||
};
|
||||
|
||||
/**
|
||||
* Replaces options.readPreference with a ReadPreference instance
|
||||
*/
|
||||
ReadPreference.translate = function(options) {
|
||||
if (options.readPreference == null) return options;
|
||||
const r = options.readPreference;
|
||||
|
||||
if (typeof r === 'string') {
|
||||
options.readPreference = new ReadPreference(r);
|
||||
} else if (r && !(r instanceof ReadPreference) && typeof r === 'object') {
|
||||
const mode = r.mode || r.preference;
|
||||
if (mode && typeof mode === 'string') {
|
||||
options.readPreference = new ReadPreference(mode, r.tags, {
|
||||
maxStalenessSeconds: r.maxStalenessSeconds
|
||||
});
|
||||
}
|
||||
} else if (!(r instanceof ReadPreference)) {
|
||||
throw new TypeError('Invalid read preference: ' + r);
|
||||
}
|
||||
|
||||
return options;
|
||||
};
|
||||
|
||||
/**
|
||||
* Validate if a mode is legal
|
||||
*
|
||||
* @method
|
||||
* @param {string} mode The string representing the read preference mode.
|
||||
* @return {boolean} True if a mode is valid
|
||||
*/
|
||||
ReadPreference.isValid = function(mode) {
|
||||
return VALID_MODES.indexOf(mode) !== -1;
|
||||
};
|
||||
|
||||
/**
|
||||
* Validate if a mode is legal
|
||||
*
|
||||
* @method
|
||||
* @param {string} mode The string representing the read preference mode.
|
||||
* @return {boolean} True if a mode is valid
|
||||
*/
|
||||
ReadPreference.prototype.isValid = function(mode) {
|
||||
return ReadPreference.isValid(typeof mode === 'string' ? mode : this.mode);
|
||||
};
|
||||
|
||||
const needSlaveOk = ['primaryPreferred', 'secondary', 'secondaryPreferred', 'nearest'];
|
||||
|
||||
/**
|
||||
* Indicates that this readPreference needs the "slaveOk" bit when sent over the wire
|
||||
* @method
|
||||
* @return {boolean}
|
||||
* @see https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#op-query
|
||||
*/
|
||||
ReadPreference.prototype.slaveOk = function() {
|
||||
return needSlaveOk.indexOf(this.mode) !== -1;
|
||||
};
|
||||
|
||||
/**
|
||||
* Are the two read preference equal
|
||||
* @method
|
||||
* @param {ReadPreference} readPreference The read preference with which to check equality
|
||||
* @return {boolean} True if the two ReadPreferences are equivalent
|
||||
*/
|
||||
ReadPreference.prototype.equals = function(readPreference) {
|
||||
return readPreference.mode === this.mode;
|
||||
};
|
||||
|
||||
/**
|
||||
* Return JSON representation
|
||||
* @method
|
||||
* @return {Object} A JSON representation of the ReadPreference
|
||||
*/
|
||||
ReadPreference.prototype.toJSON = function() {
|
||||
const readPreference = { mode: this.mode };
|
||||
if (Array.isArray(this.tags)) readPreference.tags = this.tags;
|
||||
if (this.maxStalenessSeconds) readPreference.maxStalenessSeconds = this.maxStalenessSeconds;
|
||||
if (this.hedge) readPreference.hedge = this.hedge;
|
||||
return readPreference;
|
||||
};
|
||||
|
||||
/**
|
||||
* Primary read preference
|
||||
* @member
|
||||
* @type {ReadPreference}
|
||||
*/
|
||||
ReadPreference.primary = new ReadPreference('primary');
|
||||
/**
|
||||
* Primary Preferred read preference
|
||||
* @member
|
||||
* @type {ReadPreference}
|
||||
*/
|
||||
ReadPreference.primaryPreferred = new ReadPreference('primaryPreferred');
|
||||
/**
|
||||
* Secondary read preference
|
||||
* @member
|
||||
* @type {ReadPreference}
|
||||
*/
|
||||
ReadPreference.secondary = new ReadPreference('secondary');
|
||||
/**
|
||||
* Secondary Preferred read preference
|
||||
* @member
|
||||
* @type {ReadPreference}
|
||||
*/
|
||||
ReadPreference.secondaryPreferred = new ReadPreference('secondaryPreferred');
|
||||
/**
|
||||
* Nearest read preference
|
||||
* @member
|
||||
* @type {ReadPreference}
|
||||
*/
|
||||
ReadPreference.nearest = new ReadPreference('nearest');
|
||||
|
||||
module.exports = ReadPreference;
|
||||
1559
node_modules/mongodb/lib/core/topologies/replset.js
generated
vendored
Normal file
1559
node_modules/mongodb/lib/core/topologies/replset.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1121
node_modules/mongodb/lib/core/topologies/replset_state.js
generated
vendored
Normal file
1121
node_modules/mongodb/lib/core/topologies/replset_state.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
990
node_modules/mongodb/lib/core/topologies/server.js
generated
vendored
Normal file
990
node_modules/mongodb/lib/core/topologies/server.js
generated
vendored
Normal file
@@ -0,0 +1,990 @@
|
||||
'use strict';
|
||||
|
||||
var inherits = require('util').inherits,
|
||||
f = require('util').format,
|
||||
EventEmitter = require('events').EventEmitter,
|
||||
ReadPreference = require('./read_preference'),
|
||||
Logger = require('../connection/logger'),
|
||||
debugOptions = require('../connection/utils').debugOptions,
|
||||
retrieveBSON = require('../connection/utils').retrieveBSON,
|
||||
Pool = require('../connection/pool'),
|
||||
MongoError = require('../error').MongoError,
|
||||
MongoNetworkError = require('../error').MongoNetworkError,
|
||||
wireProtocol = require('../wireprotocol'),
|
||||
CoreCursor = require('../cursor').CoreCursor,
|
||||
sdam = require('./shared'),
|
||||
createCompressionInfo = require('./shared').createCompressionInfo,
|
||||
resolveClusterTime = require('./shared').resolveClusterTime,
|
||||
SessionMixins = require('./shared').SessionMixins,
|
||||
relayEvents = require('../utils').relayEvents;
|
||||
|
||||
const collationNotSupported = require('../utils').collationNotSupported;
|
||||
const makeClientMetadata = require('../utils').makeClientMetadata;
|
||||
|
||||
// Used for filtering out fields for loggin
|
||||
var debugFields = [
|
||||
'reconnect',
|
||||
'reconnectTries',
|
||||
'reconnectInterval',
|
||||
'emitError',
|
||||
'cursorFactory',
|
||||
'host',
|
||||
'port',
|
||||
'size',
|
||||
'keepAlive',
|
||||
'keepAliveInitialDelay',
|
||||
'noDelay',
|
||||
'connectionTimeout',
|
||||
'checkServerIdentity',
|
||||
'socketTimeout',
|
||||
'ssl',
|
||||
'ca',
|
||||
'crl',
|
||||
'cert',
|
||||
'key',
|
||||
'rejectUnauthorized',
|
||||
'promoteLongs',
|
||||
'promoteValues',
|
||||
'promoteBuffers',
|
||||
'servername'
|
||||
];
|
||||
|
||||
// Server instance id
|
||||
var id = 0;
|
||||
var serverAccounting = false;
|
||||
var servers = {};
|
||||
var BSON = retrieveBSON();
|
||||
|
||||
function topologyId(server) {
|
||||
return server.s.parent == null ? server.id : server.s.parent.id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new Server instance
|
||||
* @class
|
||||
* @param {boolean} [options.reconnect=true] Server will attempt to reconnect on loss of connection
|
||||
* @param {number} [options.reconnectTries=30] Server attempt to reconnect #times
|
||||
* @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries
|
||||
* @param {number} [options.monitoring=true] Enable the server state monitoring (calling ismaster at monitoringInterval)
|
||||
* @param {number} [options.monitoringInterval=5000] The interval of calling ismaster when monitoring is enabled.
|
||||
* @param {Cursor} [options.cursorFactory=Cursor] The cursor factory class used for all query cursors
|
||||
* @param {string} options.host The server host
|
||||
* @param {number} options.port The server port
|
||||
* @param {number} [options.size=5] Server connection pool size
|
||||
* @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled
|
||||
* @param {number} [options.keepAliveInitialDelay=120000] Initial delay before TCP keep alive enabled
|
||||
* @param {boolean} [options.noDelay=true] TCP Connection no delay
|
||||
* @param {number} [options.connectionTimeout=30000] TCP Connection timeout setting
|
||||
* @param {number} [options.socketTimeout=360000] TCP Socket timeout setting
|
||||
* @param {boolean} [options.ssl=false] Use SSL for connection
|
||||
* @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function.
|
||||
* @param {Buffer} [options.ca] SSL Certificate store binary buffer
|
||||
* @param {Buffer} [options.crl] SSL Certificate revocation store binary buffer
|
||||
* @param {Buffer} [options.cert] SSL Certificate binary buffer
|
||||
* @param {Buffer} [options.key] SSL Key file binary buffer
|
||||
* @param {string} [options.passphrase] SSL Certificate pass phrase
|
||||
* @param {boolean} [options.rejectUnauthorized=true] Reject unauthorized server certificates
|
||||
* @param {string} [options.servername=null] String containing the server name requested via TLS SNI.
|
||||
* @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits
|
||||
* @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types.
|
||||
* @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers.
|
||||
* @param {string} [options.appname=null] Application name, passed in on ismaster call and logged in mongod server logs. Maximum size 128 bytes.
|
||||
* @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit.
|
||||
* @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology
|
||||
* @return {Server} A cursor instance
|
||||
* @fires Server#connect
|
||||
* @fires Server#close
|
||||
* @fires Server#error
|
||||
* @fires Server#timeout
|
||||
* @fires Server#parseError
|
||||
* @fires Server#reconnect
|
||||
* @fires Server#reconnectFailed
|
||||
* @fires Server#serverHeartbeatStarted
|
||||
* @fires Server#serverHeartbeatSucceeded
|
||||
* @fires Server#serverHeartbeatFailed
|
||||
* @fires Server#topologyOpening
|
||||
* @fires Server#topologyClosed
|
||||
* @fires Server#topologyDescriptionChanged
|
||||
* @property {string} type the topology type.
|
||||
* @property {string} parserType the parser type used (c++ or js).
|
||||
*/
|
||||
var Server = function(options) {
|
||||
options = options || {};
|
||||
|
||||
// Add event listener
|
||||
EventEmitter.call(this);
|
||||
|
||||
// Server instance id
|
||||
this.id = id++;
|
||||
|
||||
// Internal state
|
||||
this.s = {
|
||||
// Options
|
||||
options: Object.assign({ metadata: makeClientMetadata(options) }, options),
|
||||
// Logger
|
||||
logger: Logger('Server', options),
|
||||
// Factory overrides
|
||||
Cursor: options.cursorFactory || CoreCursor,
|
||||
// BSON instance
|
||||
bson:
|
||||
options.bson ||
|
||||
new BSON([
|
||||
BSON.Binary,
|
||||
BSON.Code,
|
||||
BSON.DBRef,
|
||||
BSON.Decimal128,
|
||||
BSON.Double,
|
||||
BSON.Int32,
|
||||
BSON.Long,
|
||||
BSON.Map,
|
||||
BSON.MaxKey,
|
||||
BSON.MinKey,
|
||||
BSON.ObjectId,
|
||||
BSON.BSONRegExp,
|
||||
BSON.Symbol,
|
||||
BSON.Timestamp
|
||||
]),
|
||||
// Pool
|
||||
pool: null,
|
||||
// Disconnect handler
|
||||
disconnectHandler: options.disconnectHandler,
|
||||
// Monitor thread (keeps the connection alive)
|
||||
monitoring: typeof options.monitoring === 'boolean' ? options.monitoring : true,
|
||||
// Is the server in a topology
|
||||
inTopology: !!options.parent,
|
||||
// Monitoring timeout
|
||||
monitoringInterval:
|
||||
typeof options.monitoringInterval === 'number' ? options.monitoringInterval : 5000,
|
||||
compression: { compressors: createCompressionInfo(options) },
|
||||
// Optional parent topology
|
||||
parent: options.parent
|
||||
};
|
||||
|
||||
// If this is a single deployment we need to track the clusterTime here
|
||||
if (!this.s.parent) {
|
||||
this.s.clusterTime = null;
|
||||
}
|
||||
|
||||
// Curent ismaster
|
||||
this.ismaster = null;
|
||||
// Current ping time
|
||||
this.lastIsMasterMS = -1;
|
||||
// The monitoringProcessId
|
||||
this.monitoringProcessId = null;
|
||||
// Initial connection
|
||||
this.initialConnect = true;
|
||||
// Default type
|
||||
this._type = 'server';
|
||||
|
||||
// Max Stalleness values
|
||||
// last time we updated the ismaster state
|
||||
this.lastUpdateTime = 0;
|
||||
// Last write time
|
||||
this.lastWriteDate = 0;
|
||||
// Stalleness
|
||||
this.staleness = 0;
|
||||
};
|
||||
|
||||
inherits(Server, EventEmitter);
|
||||
Object.assign(Server.prototype, SessionMixins);
|
||||
|
||||
Object.defineProperty(Server.prototype, 'type', {
|
||||
enumerable: true,
|
||||
get: function() {
|
||||
return this._type;
|
||||
}
|
||||
});
|
||||
|
||||
Object.defineProperty(Server.prototype, 'parserType', {
|
||||
enumerable: true,
|
||||
get: function() {
|
||||
return BSON.native ? 'c++' : 'js';
|
||||
}
|
||||
});
|
||||
|
||||
Object.defineProperty(Server.prototype, 'logicalSessionTimeoutMinutes', {
|
||||
enumerable: true,
|
||||
get: function() {
|
||||
if (!this.ismaster) return null;
|
||||
return this.ismaster.logicalSessionTimeoutMinutes || null;
|
||||
}
|
||||
});
|
||||
|
||||
Object.defineProperty(Server.prototype, 'clientMetadata', {
|
||||
enumerable: true,
|
||||
get: function() {
|
||||
return this.s.options.metadata;
|
||||
}
|
||||
});
|
||||
|
||||
// In single server deployments we track the clusterTime directly on the topology, however
|
||||
// in Mongos and ReplSet deployments we instead need to delegate the clusterTime up to the
|
||||
// tracking objects so we can ensure we are gossiping the maximum time received from the
|
||||
// server.
|
||||
Object.defineProperty(Server.prototype, 'clusterTime', {
|
||||
enumerable: true,
|
||||
set: function(clusterTime) {
|
||||
const settings = this.s.parent ? this.s.parent : this.s;
|
||||
resolveClusterTime(settings, clusterTime);
|
||||
},
|
||||
get: function() {
|
||||
const settings = this.s.parent ? this.s.parent : this.s;
|
||||
return settings.clusterTime || null;
|
||||
}
|
||||
});
|
||||
|
||||
Server.enableServerAccounting = function() {
|
||||
serverAccounting = true;
|
||||
servers = {};
|
||||
};
|
||||
|
||||
Server.disableServerAccounting = function() {
|
||||
serverAccounting = false;
|
||||
};
|
||||
|
||||
Server.servers = function() {
|
||||
return servers;
|
||||
};
|
||||
|
||||
Object.defineProperty(Server.prototype, 'name', {
|
||||
enumerable: true,
|
||||
get: function() {
|
||||
return this.s.options.host + ':' + this.s.options.port;
|
||||
}
|
||||
});
|
||||
|
||||
function disconnectHandler(self, type, ns, cmd, options, callback) {
|
||||
// Topology is not connected, save the call in the provided store to be
|
||||
// Executed at some point when the handler deems it's reconnected
|
||||
if (
|
||||
!self.s.pool.isConnected() &&
|
||||
self.s.options.reconnect &&
|
||||
self.s.disconnectHandler != null &&
|
||||
!options.monitoring
|
||||
) {
|
||||
self.s.disconnectHandler.add(type, ns, cmd, options, callback);
|
||||
return true;
|
||||
}
|
||||
|
||||
// If we have no connection error
|
||||
if (!self.s.pool.isConnected()) {
|
||||
callback(new MongoError(f('no connection available to server %s', self.name)));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
function monitoringProcess(self) {
|
||||
return function() {
|
||||
// Pool was destroyed do not continue process
|
||||
if (self.s.pool.isDestroyed()) return;
|
||||
// Emit monitoring Process event
|
||||
self.emit('monitoring', self);
|
||||
// Perform ismaster call
|
||||
// Get start time
|
||||
var start = new Date().getTime();
|
||||
|
||||
// Execute the ismaster query
|
||||
self.command(
|
||||
'admin.$cmd',
|
||||
{ ismaster: true },
|
||||
{
|
||||
socketTimeout:
|
||||
typeof self.s.options.connectionTimeout !== 'number'
|
||||
? 2000
|
||||
: self.s.options.connectionTimeout,
|
||||
monitoring: true
|
||||
},
|
||||
(err, result) => {
|
||||
// Set initial lastIsMasterMS
|
||||
self.lastIsMasterMS = new Date().getTime() - start;
|
||||
if (self.s.pool.isDestroyed()) return;
|
||||
// Update the ismaster view if we have a result
|
||||
if (result) {
|
||||
self.ismaster = result.result;
|
||||
}
|
||||
// Re-schedule the monitoring process
|
||||
self.monitoringProcessId = setTimeout(monitoringProcess(self), self.s.monitoringInterval);
|
||||
}
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
var eventHandler = function(self, event) {
|
||||
return function(err, conn) {
|
||||
// Log information of received information if in info mode
|
||||
if (self.s.logger.isInfo()) {
|
||||
var object = err instanceof MongoError ? JSON.stringify(err) : {};
|
||||
self.s.logger.info(
|
||||
f('server %s fired event %s out with message %s', self.name, event, object)
|
||||
);
|
||||
}
|
||||
|
||||
// Handle connect event
|
||||
if (event === 'connect') {
|
||||
self.initialConnect = false;
|
||||
self.ismaster = conn.ismaster;
|
||||
self.lastIsMasterMS = conn.lastIsMasterMS;
|
||||
if (conn.agreedCompressor) {
|
||||
self.s.pool.options.agreedCompressor = conn.agreedCompressor;
|
||||
}
|
||||
|
||||
if (conn.zlibCompressionLevel) {
|
||||
self.s.pool.options.zlibCompressionLevel = conn.zlibCompressionLevel;
|
||||
}
|
||||
|
||||
if (conn.ismaster.$clusterTime) {
|
||||
const $clusterTime = conn.ismaster.$clusterTime;
|
||||
self.clusterTime = $clusterTime;
|
||||
}
|
||||
|
||||
// It's a proxy change the type so
|
||||
// the wireprotocol will send $readPreference
|
||||
if (self.ismaster.msg === 'isdbgrid') {
|
||||
self._type = 'mongos';
|
||||
}
|
||||
|
||||
// Have we defined self monitoring
|
||||
if (self.s.monitoring) {
|
||||
self.monitoringProcessId = setTimeout(monitoringProcess(self), self.s.monitoringInterval);
|
||||
}
|
||||
|
||||
// Emit server description changed if something listening
|
||||
sdam.emitServerDescriptionChanged(self, {
|
||||
address: self.name,
|
||||
arbiters: [],
|
||||
hosts: [],
|
||||
passives: [],
|
||||
type: sdam.getTopologyType(self)
|
||||
});
|
||||
|
||||
if (!self.s.inTopology) {
|
||||
// Emit topology description changed if something listening
|
||||
sdam.emitTopologyDescriptionChanged(self, {
|
||||
topologyType: 'Single',
|
||||
servers: [
|
||||
{
|
||||
address: self.name,
|
||||
arbiters: [],
|
||||
hosts: [],
|
||||
passives: [],
|
||||
type: sdam.getTopologyType(self)
|
||||
}
|
||||
]
|
||||
});
|
||||
}
|
||||
|
||||
// Log the ismaster if available
|
||||
if (self.s.logger.isInfo()) {
|
||||
self.s.logger.info(
|
||||
f('server %s connected with ismaster [%s]', self.name, JSON.stringify(self.ismaster))
|
||||
);
|
||||
}
|
||||
|
||||
// Emit connect
|
||||
self.emit('connect', self);
|
||||
} else if (
|
||||
event === 'error' ||
|
||||
event === 'parseError' ||
|
||||
event === 'close' ||
|
||||
event === 'timeout' ||
|
||||
event === 'reconnect' ||
|
||||
event === 'attemptReconnect' ||
|
||||
'reconnectFailed'
|
||||
) {
|
||||
// Remove server instance from accounting
|
||||
if (
|
||||
serverAccounting &&
|
||||
['close', 'timeout', 'error', 'parseError', 'reconnectFailed'].indexOf(event) !== -1
|
||||
) {
|
||||
// Emit toplogy opening event if not in topology
|
||||
if (!self.s.inTopology) {
|
||||
self.emit('topologyOpening', { topologyId: self.id });
|
||||
}
|
||||
|
||||
delete servers[self.id];
|
||||
}
|
||||
|
||||
if (event === 'close') {
|
||||
// Closing emits a server description changed event going to unknown.
|
||||
sdam.emitServerDescriptionChanged(self, {
|
||||
address: self.name,
|
||||
arbiters: [],
|
||||
hosts: [],
|
||||
passives: [],
|
||||
type: 'Unknown'
|
||||
});
|
||||
}
|
||||
|
||||
// Reconnect failed return error
|
||||
if (event === 'reconnectFailed') {
|
||||
self.emit('reconnectFailed', err);
|
||||
// Emit error if any listeners
|
||||
if (self.listeners('error').length > 0) {
|
||||
self.emit('error', err);
|
||||
}
|
||||
// Terminate
|
||||
return;
|
||||
}
|
||||
|
||||
// On first connect fail
|
||||
if (
|
||||
['disconnected', 'connecting'].indexOf(self.s.pool.state) !== -1 &&
|
||||
self.initialConnect &&
|
||||
['close', 'timeout', 'error', 'parseError'].indexOf(event) !== -1
|
||||
) {
|
||||
self.initialConnect = false;
|
||||
return self.emit(
|
||||
'error',
|
||||
new MongoNetworkError(
|
||||
f('failed to connect to server [%s] on first connect [%s]', self.name, err)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// Reconnect event, emit the server
|
||||
if (event === 'reconnect') {
|
||||
// Reconnecting emits a server description changed event going from unknown to the
|
||||
// current server type.
|
||||
sdam.emitServerDescriptionChanged(self, {
|
||||
address: self.name,
|
||||
arbiters: [],
|
||||
hosts: [],
|
||||
passives: [],
|
||||
type: sdam.getTopologyType(self)
|
||||
});
|
||||
return self.emit(event, self);
|
||||
}
|
||||
|
||||
// Emit the event
|
||||
self.emit(event, err);
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Initiate server connect
|
||||
*/
|
||||
Server.prototype.connect = function(options) {
|
||||
var self = this;
|
||||
options = options || {};
|
||||
|
||||
// Set the connections
|
||||
if (serverAccounting) servers[this.id] = this;
|
||||
|
||||
// Do not allow connect to be called on anything that's not disconnected
|
||||
if (self.s.pool && !self.s.pool.isDisconnected() && !self.s.pool.isDestroyed()) {
|
||||
throw new MongoError(f('server instance in invalid state %s', self.s.pool.state));
|
||||
}
|
||||
|
||||
// Create a pool
|
||||
self.s.pool = new Pool(this, Object.assign(self.s.options, options, { bson: this.s.bson }));
|
||||
|
||||
// Set up listeners
|
||||
self.s.pool.on('close', eventHandler(self, 'close'));
|
||||
self.s.pool.on('error', eventHandler(self, 'error'));
|
||||
self.s.pool.on('timeout', eventHandler(self, 'timeout'));
|
||||
self.s.pool.on('parseError', eventHandler(self, 'parseError'));
|
||||
self.s.pool.on('connect', eventHandler(self, 'connect'));
|
||||
self.s.pool.on('reconnect', eventHandler(self, 'reconnect'));
|
||||
self.s.pool.on('reconnectFailed', eventHandler(self, 'reconnectFailed'));
|
||||
|
||||
// Set up listeners for command monitoring
|
||||
relayEvents(self.s.pool, self, ['commandStarted', 'commandSucceeded', 'commandFailed']);
|
||||
|
||||
// Emit toplogy opening event if not in topology
|
||||
if (!self.s.inTopology) {
|
||||
this.emit('topologyOpening', { topologyId: topologyId(self) });
|
||||
}
|
||||
|
||||
// Emit opening server event
|
||||
self.emit('serverOpening', { topologyId: topologyId(self), address: self.name });
|
||||
|
||||
self.s.pool.connect();
|
||||
};
|
||||
|
||||
/**
|
||||
* Authenticate the topology.
|
||||
* @method
|
||||
* @param {MongoCredentials} credentials The credentials for authentication we are using
|
||||
* @param {authResultCallback} callback A callback function
|
||||
*/
|
||||
Server.prototype.auth = function(credentials, callback) {
|
||||
if (typeof callback === 'function') callback(null, null);
|
||||
};
|
||||
|
||||
/**
|
||||
* Get the server description
|
||||
* @method
|
||||
* @return {object}
|
||||
*/
|
||||
Server.prototype.getDescription = function() {
|
||||
var ismaster = this.ismaster || {};
|
||||
var description = {
|
||||
type: sdam.getTopologyType(this),
|
||||
address: this.name
|
||||
};
|
||||
|
||||
// Add fields if available
|
||||
if (ismaster.hosts) description.hosts = ismaster.hosts;
|
||||
if (ismaster.arbiters) description.arbiters = ismaster.arbiters;
|
||||
if (ismaster.passives) description.passives = ismaster.passives;
|
||||
if (ismaster.setName) description.setName = ismaster.setName;
|
||||
return description;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the last known ismaster document for this server
|
||||
* @method
|
||||
* @return {object}
|
||||
*/
|
||||
Server.prototype.lastIsMaster = function() {
|
||||
return this.ismaster;
|
||||
};
|
||||
|
||||
/**
|
||||
* Unref all connections belong to this server
|
||||
* @method
|
||||
*/
|
||||
Server.prototype.unref = function() {
|
||||
this.s.pool.unref();
|
||||
};
|
||||
|
||||
/**
|
||||
* Figure out if the server is connected
|
||||
* @method
|
||||
* @return {boolean}
|
||||
*/
|
||||
Server.prototype.isConnected = function() {
|
||||
if (!this.s.pool) return false;
|
||||
return this.s.pool.isConnected();
|
||||
};
|
||||
|
||||
/**
|
||||
* Figure out if the server instance was destroyed by calling destroy
|
||||
* @method
|
||||
* @return {boolean}
|
||||
*/
|
||||
Server.prototype.isDestroyed = function() {
|
||||
if (!this.s.pool) return false;
|
||||
return this.s.pool.isDestroyed();
|
||||
};
|
||||
|
||||
function basicWriteValidations(self) {
|
||||
if (!self.s.pool) return new MongoError('server instance is not connected');
|
||||
if (self.s.pool.isDestroyed()) return new MongoError('server instance pool was destroyed');
|
||||
}
|
||||
|
||||
function basicReadValidations(self, options) {
|
||||
basicWriteValidations(self, options);
|
||||
|
||||
if (options.readPreference && !(options.readPreference instanceof ReadPreference)) {
|
||||
throw new Error('readPreference must be an instance of ReadPreference');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a command
|
||||
* @method
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {object} cmd The command hash
|
||||
* @param {ReadPreference} [options.readPreference] Specify read preference if command supports it
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.checkKeys=false] Specify if the bson parser should validate keys.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {Boolean} [options.fullResult=false] Return the full envelope instead of just the result document.
|
||||
* @param {ClientSession} [options.session=null] Session to use for the operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
Server.prototype.command = function(ns, cmd, options, callback) {
|
||||
var self = this;
|
||||
if (typeof options === 'function') {
|
||||
(callback = options), (options = {}), (options = options || {});
|
||||
}
|
||||
|
||||
var result = basicReadValidations(self, options);
|
||||
if (result) return callback(result);
|
||||
|
||||
// Clone the options
|
||||
options = Object.assign({}, options, { wireProtocolCommand: false });
|
||||
|
||||
// Debug log
|
||||
if (self.s.logger.isDebug())
|
||||
self.s.logger.debug(
|
||||
f(
|
||||
'executing command [%s] against %s',
|
||||
JSON.stringify({
|
||||
ns: ns,
|
||||
cmd: cmd,
|
||||
options: debugOptions(debugFields, options)
|
||||
}),
|
||||
self.name
|
||||
)
|
||||
);
|
||||
|
||||
// If we are not connected or have a disconnectHandler specified
|
||||
if (disconnectHandler(self, 'command', ns, cmd, options, callback)) return;
|
||||
|
||||
// error if collation not supported
|
||||
if (collationNotSupported(this, cmd)) {
|
||||
return callback(new MongoError(`server ${this.name} does not support collation`));
|
||||
}
|
||||
|
||||
wireProtocol.command(self, ns, cmd, options, callback);
|
||||
};
|
||||
|
||||
/**
|
||||
* Execute a query against the server
|
||||
*
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {object} cmd The command document for the query
|
||||
* @param {object} options Optional settings
|
||||
* @param {function} callback
|
||||
*/
|
||||
Server.prototype.query = function(ns, cmd, cursorState, options, callback) {
|
||||
wireProtocol.query(this, ns, cmd, cursorState, options, callback);
|
||||
};
|
||||
|
||||
/**
|
||||
* Execute a `getMore` against the server
|
||||
*
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {object} cursorState State data associated with the cursor calling this method
|
||||
* @param {object} options Optional settings
|
||||
* @param {function} callback
|
||||
*/
|
||||
Server.prototype.getMore = function(ns, cursorState, batchSize, options, callback) {
|
||||
wireProtocol.getMore(this, ns, cursorState, batchSize, options, callback);
|
||||
};
|
||||
|
||||
/**
|
||||
* Execute a `killCursors` command against the server
|
||||
*
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {object} cursorState State data associated with the cursor calling this method
|
||||
* @param {function} callback
|
||||
*/
|
||||
Server.prototype.killCursors = function(ns, cursorState, callback) {
|
||||
wireProtocol.killCursors(this, ns, cursorState, callback);
|
||||
};
|
||||
|
||||
/**
|
||||
* Insert one or more documents
|
||||
* @method
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {array} ops An array of documents to insert
|
||||
* @param {boolean} [options.ordered=true] Execute in order or out of order
|
||||
* @param {object} [options.writeConcern={}] Write concern for the operation
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {ClientSession} [options.session=null] Session to use for the operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
Server.prototype.insert = function(ns, ops, options, callback) {
|
||||
var self = this;
|
||||
if (typeof options === 'function') {
|
||||
(callback = options), (options = {}), (options = options || {});
|
||||
}
|
||||
|
||||
var result = basicWriteValidations(self, options);
|
||||
if (result) return callback(result);
|
||||
|
||||
// If we are not connected or have a disconnectHandler specified
|
||||
if (disconnectHandler(self, 'insert', ns, ops, options, callback)) return;
|
||||
|
||||
// Setup the docs as an array
|
||||
ops = Array.isArray(ops) ? ops : [ops];
|
||||
|
||||
// Execute write
|
||||
return wireProtocol.insert(self, ns, ops, options, callback);
|
||||
};
|
||||
|
||||
/**
|
||||
* Perform one or more update operations
|
||||
* @method
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {array} ops An array of updates
|
||||
* @param {boolean} [options.ordered=true] Execute in order or out of order
|
||||
* @param {object} [options.writeConcern={}] Write concern for the operation
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {ClientSession} [options.session=null] Session to use for the operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
Server.prototype.update = function(ns, ops, options, callback) {
|
||||
var self = this;
|
||||
if (typeof options === 'function') {
|
||||
(callback = options), (options = {}), (options = options || {});
|
||||
}
|
||||
|
||||
var result = basicWriteValidations(self, options);
|
||||
if (result) return callback(result);
|
||||
|
||||
// If we are not connected or have a disconnectHandler specified
|
||||
if (disconnectHandler(self, 'update', ns, ops, options, callback)) return;
|
||||
|
||||
// error if collation not supported
|
||||
if (collationNotSupported(this, options)) {
|
||||
return callback(new MongoError(`server ${this.name} does not support collation`));
|
||||
}
|
||||
|
||||
// Setup the docs as an array
|
||||
ops = Array.isArray(ops) ? ops : [ops];
|
||||
// Execute write
|
||||
return wireProtocol.update(self, ns, ops, options, callback);
|
||||
};
|
||||
|
||||
/**
|
||||
* Perform one or more remove operations
|
||||
* @method
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {array} ops An array of removes
|
||||
* @param {boolean} [options.ordered=true] Execute in order or out of order
|
||||
* @param {object} [options.writeConcern={}] Write concern for the operation
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {ClientSession} [options.session=null] Session to use for the operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
Server.prototype.remove = function(ns, ops, options, callback) {
|
||||
var self = this;
|
||||
if (typeof options === 'function') {
|
||||
(callback = options), (options = {}), (options = options || {});
|
||||
}
|
||||
|
||||
var result = basicWriteValidations(self, options);
|
||||
if (result) return callback(result);
|
||||
|
||||
// If we are not connected or have a disconnectHandler specified
|
||||
if (disconnectHandler(self, 'remove', ns, ops, options, callback)) return;
|
||||
|
||||
// error if collation not supported
|
||||
if (collationNotSupported(this, options)) {
|
||||
return callback(new MongoError(`server ${this.name} does not support collation`));
|
||||
}
|
||||
|
||||
// Setup the docs as an array
|
||||
ops = Array.isArray(ops) ? ops : [ops];
|
||||
// Execute write
|
||||
return wireProtocol.remove(self, ns, ops, options, callback);
|
||||
};
|
||||
|
||||
/**
|
||||
* Get a new cursor
|
||||
* @method
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {object|Long} cmd Can be either a command returning a cursor or a cursorId
|
||||
* @param {object} [options] Options for the cursor
|
||||
* @param {object} [options.batchSize=0] Batchsize for the operation
|
||||
* @param {array} [options.documents=[]] Initial documents list for cursor
|
||||
* @param {ReadPreference} [options.readPreference] Specify read preference if command supports it
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {ClientSession} [options.session=null] Session to use for the operation
|
||||
* @param {object} [options.topology] The internal topology of the created cursor
|
||||
* @returns {Cursor}
|
||||
*/
|
||||
Server.prototype.cursor = function(ns, cmd, options) {
|
||||
options = options || {};
|
||||
const topology = options.topology || this;
|
||||
|
||||
// Set up final cursor type
|
||||
var FinalCursor = options.cursorFactory || this.s.Cursor;
|
||||
|
||||
// Return the cursor
|
||||
return new FinalCursor(topology, ns, cmd, options);
|
||||
};
|
||||
|
||||
/**
|
||||
* Compare two server instances
|
||||
* @method
|
||||
* @param {Server} server Server to compare equality against
|
||||
* @return {boolean}
|
||||
*/
|
||||
Server.prototype.equals = function(server) {
|
||||
if (typeof server === 'string') return this.name.toLowerCase() === server.toLowerCase();
|
||||
if (server.name) return this.name.toLowerCase() === server.name.toLowerCase();
|
||||
return false;
|
||||
};
|
||||
|
||||
/**
|
||||
* All raw connections
|
||||
* @method
|
||||
* @return {Connection[]}
|
||||
*/
|
||||
Server.prototype.connections = function() {
|
||||
return this.s.pool.allConnections();
|
||||
};
|
||||
|
||||
/**
|
||||
* Selects a server
|
||||
* @method
|
||||
* @param {function} selector Unused
|
||||
* @param {ReadPreference} [options.readPreference] Unused
|
||||
* @param {ClientSession} [options.session] Unused
|
||||
* @return {Server}
|
||||
*/
|
||||
Server.prototype.selectServer = function(selector, options, callback) {
|
||||
if (typeof selector === 'function' && typeof callback === 'undefined')
|
||||
(callback = selector), (selector = undefined), (options = {});
|
||||
if (typeof options === 'function')
|
||||
(callback = options), (options = selector), (selector = undefined);
|
||||
|
||||
callback(null, this);
|
||||
};
|
||||
|
||||
var listeners = ['close', 'error', 'timeout', 'parseError', 'connect'];
|
||||
|
||||
/**
|
||||
* Destroy the server connection
|
||||
* @method
|
||||
* @param {boolean} [options.emitClose=false] Emit close event on destroy
|
||||
* @param {boolean} [options.emitDestroy=false] Emit destroy event on destroy
|
||||
* @param {boolean} [options.force=false] Force destroy the pool
|
||||
*/
|
||||
Server.prototype.destroy = function(options, callback) {
|
||||
if (this._destroyed) {
|
||||
if (typeof callback === 'function') callback(null, null);
|
||||
return;
|
||||
}
|
||||
|
||||
if (typeof options === 'function') {
|
||||
callback = options;
|
||||
options = {};
|
||||
}
|
||||
|
||||
options = options || {};
|
||||
var self = this;
|
||||
|
||||
// Set the connections
|
||||
if (serverAccounting) delete servers[this.id];
|
||||
|
||||
// Destroy the monitoring process if any
|
||||
if (this.monitoringProcessId) {
|
||||
clearTimeout(this.monitoringProcessId);
|
||||
}
|
||||
|
||||
// No pool, return
|
||||
if (!self.s.pool) {
|
||||
this._destroyed = true;
|
||||
if (typeof callback === 'function') callback(null, null);
|
||||
return;
|
||||
}
|
||||
|
||||
// Emit close event
|
||||
if (options.emitClose) {
|
||||
self.emit('close', self);
|
||||
}
|
||||
|
||||
// Emit destroy event
|
||||
if (options.emitDestroy) {
|
||||
self.emit('destroy', self);
|
||||
}
|
||||
|
||||
// Remove all listeners
|
||||
listeners.forEach(function(event) {
|
||||
self.s.pool.removeAllListeners(event);
|
||||
});
|
||||
|
||||
// Emit opening server event
|
||||
if (self.listeners('serverClosed').length > 0)
|
||||
self.emit('serverClosed', { topologyId: topologyId(self), address: self.name });
|
||||
|
||||
// Emit toplogy opening event if not in topology
|
||||
if (self.listeners('topologyClosed').length > 0 && !self.s.inTopology) {
|
||||
self.emit('topologyClosed', { topologyId: topologyId(self) });
|
||||
}
|
||||
|
||||
if (self.s.logger.isDebug()) {
|
||||
self.s.logger.debug(f('destroy called on server %s', self.name));
|
||||
}
|
||||
|
||||
// Destroy the pool
|
||||
this.s.pool.destroy(options.force, callback);
|
||||
this._destroyed = true;
|
||||
};
|
||||
|
||||
/**
|
||||
* A server connect event, used to verify that the connection is up and running
|
||||
*
|
||||
* @event Server#connect
|
||||
* @type {Server}
|
||||
*/
|
||||
|
||||
/**
|
||||
* A server reconnect event, used to verify that the server topology has reconnected
|
||||
*
|
||||
* @event Server#reconnect
|
||||
* @type {Server}
|
||||
*/
|
||||
|
||||
/**
|
||||
* A server opening SDAM monitoring event
|
||||
*
|
||||
* @event Server#serverOpening
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* A server closed SDAM monitoring event
|
||||
*
|
||||
* @event Server#serverClosed
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* A server description SDAM change monitoring event
|
||||
*
|
||||
* @event Server#serverDescriptionChanged
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* A topology open SDAM event
|
||||
*
|
||||
* @event Server#topologyOpening
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* A topology closed SDAM event
|
||||
*
|
||||
* @event Server#topologyClosed
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* A topology structure SDAM change event
|
||||
*
|
||||
* @event Server#topologyDescriptionChanged
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Server reconnect failed
|
||||
*
|
||||
* @event Server#reconnectFailed
|
||||
* @type {Error}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Server connection pool closed
|
||||
*
|
||||
* @event Server#close
|
||||
* @type {object}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Server connection pool caused an error
|
||||
*
|
||||
* @event Server#error
|
||||
* @type {Error}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Server destroyed was called
|
||||
*
|
||||
* @event Server#destroy
|
||||
* @type {Server}
|
||||
*/
|
||||
|
||||
module.exports = Server;
|
||||
456
node_modules/mongodb/lib/core/topologies/shared.js
generated
vendored
Normal file
456
node_modules/mongodb/lib/core/topologies/shared.js
generated
vendored
Normal file
@@ -0,0 +1,456 @@
|
||||
'use strict';
|
||||
const ReadPreference = require('./read_preference');
|
||||
const TopologyType = require('../sdam/common').TopologyType;
|
||||
const MongoError = require('../error').MongoError;
|
||||
const isRetryableWriteError = require('../error').isRetryableWriteError;
|
||||
const maxWireVersion = require('../utils').maxWireVersion;
|
||||
const MongoNetworkError = require('../error').MongoNetworkError;
|
||||
const MMAPv1_RETRY_WRITES_ERROR_CODE = 20;
|
||||
|
||||
/**
|
||||
* Emit event if it exists
|
||||
* @method
|
||||
*/
|
||||
function emitSDAMEvent(self, event, description) {
|
||||
if (self.listeners(event).length > 0) {
|
||||
self.emit(event, description);
|
||||
}
|
||||
}
|
||||
|
||||
function createCompressionInfo(options) {
|
||||
if (!options.compression || !options.compression.compressors) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Check that all supplied compressors are valid
|
||||
options.compression.compressors.forEach(function(compressor) {
|
||||
if (compressor !== 'snappy' && compressor !== 'zlib') {
|
||||
throw new Error('compressors must be at least one of snappy or zlib');
|
||||
}
|
||||
});
|
||||
|
||||
return options.compression.compressors;
|
||||
}
|
||||
|
||||
function clone(object) {
|
||||
return JSON.parse(JSON.stringify(object));
|
||||
}
|
||||
|
||||
var getPreviousDescription = function(self) {
|
||||
if (!self.s.serverDescription) {
|
||||
self.s.serverDescription = {
|
||||
address: self.name,
|
||||
arbiters: [],
|
||||
hosts: [],
|
||||
passives: [],
|
||||
type: 'Unknown'
|
||||
};
|
||||
}
|
||||
|
||||
return self.s.serverDescription;
|
||||
};
|
||||
|
||||
var emitServerDescriptionChanged = function(self, description) {
|
||||
if (self.listeners('serverDescriptionChanged').length > 0) {
|
||||
// Emit the server description changed events
|
||||
self.emit('serverDescriptionChanged', {
|
||||
topologyId: self.s.topologyId !== -1 ? self.s.topologyId : self.id,
|
||||
address: self.name,
|
||||
previousDescription: getPreviousDescription(self),
|
||||
newDescription: description
|
||||
});
|
||||
|
||||
self.s.serverDescription = description;
|
||||
}
|
||||
};
|
||||
|
||||
var getPreviousTopologyDescription = function(self) {
|
||||
if (!self.s.topologyDescription) {
|
||||
self.s.topologyDescription = {
|
||||
topologyType: 'Unknown',
|
||||
servers: [
|
||||
{
|
||||
address: self.name,
|
||||
arbiters: [],
|
||||
hosts: [],
|
||||
passives: [],
|
||||
type: 'Unknown'
|
||||
}
|
||||
]
|
||||
};
|
||||
}
|
||||
|
||||
return self.s.topologyDescription;
|
||||
};
|
||||
|
||||
var emitTopologyDescriptionChanged = function(self, description) {
|
||||
if (self.listeners('topologyDescriptionChanged').length > 0) {
|
||||
// Emit the server description changed events
|
||||
self.emit('topologyDescriptionChanged', {
|
||||
topologyId: self.s.topologyId !== -1 ? self.s.topologyId : self.id,
|
||||
address: self.name,
|
||||
previousDescription: getPreviousTopologyDescription(self),
|
||||
newDescription: description
|
||||
});
|
||||
|
||||
self.s.serverDescription = description;
|
||||
}
|
||||
};
|
||||
|
||||
var changedIsMaster = function(self, currentIsmaster, ismaster) {
|
||||
var currentType = getTopologyType(self, currentIsmaster);
|
||||
var newType = getTopologyType(self, ismaster);
|
||||
if (newType !== currentType) return true;
|
||||
return false;
|
||||
};
|
||||
|
||||
var getTopologyType = function(self, ismaster) {
|
||||
if (!ismaster) {
|
||||
ismaster = self.ismaster;
|
||||
}
|
||||
|
||||
if (!ismaster) return 'Unknown';
|
||||
if (ismaster.ismaster && ismaster.msg === 'isdbgrid') return 'Mongos';
|
||||
if (ismaster.ismaster && !ismaster.hosts) return 'Standalone';
|
||||
if (ismaster.ismaster) return 'RSPrimary';
|
||||
if (ismaster.secondary) return 'RSSecondary';
|
||||
if (ismaster.arbiterOnly) return 'RSArbiter';
|
||||
return 'Unknown';
|
||||
};
|
||||
|
||||
var inquireServerState = function(self) {
|
||||
return function(callback) {
|
||||
if (self.s.state === 'destroyed') return;
|
||||
// Record response time
|
||||
var start = new Date().getTime();
|
||||
|
||||
// emitSDAMEvent
|
||||
emitSDAMEvent(self, 'serverHeartbeatStarted', { connectionId: self.name });
|
||||
|
||||
// Attempt to execute ismaster command
|
||||
self.command('admin.$cmd', { ismaster: true }, { monitoring: true }, function(err, r) {
|
||||
if (!err) {
|
||||
// Legacy event sender
|
||||
self.emit('ismaster', r, self);
|
||||
|
||||
// Calculate latencyMS
|
||||
var latencyMS = new Date().getTime() - start;
|
||||
|
||||
// Server heart beat event
|
||||
emitSDAMEvent(self, 'serverHeartbeatSucceeded', {
|
||||
durationMS: latencyMS,
|
||||
reply: r.result,
|
||||
connectionId: self.name
|
||||
});
|
||||
|
||||
// Did the server change
|
||||
if (changedIsMaster(self, self.s.ismaster, r.result)) {
|
||||
// Emit server description changed if something listening
|
||||
emitServerDescriptionChanged(self, {
|
||||
address: self.name,
|
||||
arbiters: [],
|
||||
hosts: [],
|
||||
passives: [],
|
||||
type: !self.s.inTopology ? 'Standalone' : getTopologyType(self)
|
||||
});
|
||||
}
|
||||
|
||||
// Updat ismaster view
|
||||
self.s.ismaster = r.result;
|
||||
|
||||
// Set server response time
|
||||
self.s.isMasterLatencyMS = latencyMS;
|
||||
} else {
|
||||
emitSDAMEvent(self, 'serverHeartbeatFailed', {
|
||||
durationMS: latencyMS,
|
||||
failure: err,
|
||||
connectionId: self.name
|
||||
});
|
||||
}
|
||||
|
||||
// Peforming an ismaster monitoring callback operation
|
||||
if (typeof callback === 'function') {
|
||||
return callback(err, r);
|
||||
}
|
||||
|
||||
// Perform another sweep
|
||||
self.s.inquireServerStateTimeout = setTimeout(inquireServerState(self), self.s.haInterval);
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
//
|
||||
// Clone the options
|
||||
var cloneOptions = function(options) {
|
||||
var opts = {};
|
||||
for (var name in options) {
|
||||
opts[name] = options[name];
|
||||
}
|
||||
return opts;
|
||||
};
|
||||
|
||||
function Interval(fn, time) {
|
||||
var timer = false;
|
||||
|
||||
this.start = function() {
|
||||
if (!this.isRunning()) {
|
||||
timer = setInterval(fn, time);
|
||||
}
|
||||
|
||||
return this;
|
||||
};
|
||||
|
||||
this.stop = function() {
|
||||
clearInterval(timer);
|
||||
timer = false;
|
||||
return this;
|
||||
};
|
||||
|
||||
this.isRunning = function() {
|
||||
return timer !== false;
|
||||
};
|
||||
}
|
||||
|
||||
function Timeout(fn, time) {
|
||||
var timer = false;
|
||||
var func = () => {
|
||||
if (timer) {
|
||||
clearTimeout(timer);
|
||||
timer = false;
|
||||
|
||||
fn();
|
||||
}
|
||||
};
|
||||
|
||||
this.start = function() {
|
||||
if (!this.isRunning()) {
|
||||
timer = setTimeout(func, time);
|
||||
}
|
||||
return this;
|
||||
};
|
||||
|
||||
this.stop = function() {
|
||||
clearTimeout(timer);
|
||||
timer = false;
|
||||
return this;
|
||||
};
|
||||
|
||||
this.isRunning = function() {
|
||||
return timer !== false;
|
||||
};
|
||||
}
|
||||
|
||||
function diff(previous, current) {
|
||||
// Difference document
|
||||
var diff = {
|
||||
servers: []
|
||||
};
|
||||
|
||||
// Previous entry
|
||||
if (!previous) {
|
||||
previous = { servers: [] };
|
||||
}
|
||||
|
||||
// Check if we have any previous servers missing in the current ones
|
||||
for (var i = 0; i < previous.servers.length; i++) {
|
||||
var found = false;
|
||||
|
||||
for (var j = 0; j < current.servers.length; j++) {
|
||||
if (current.servers[j].address.toLowerCase() === previous.servers[i].address.toLowerCase()) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
// Add to the diff
|
||||
diff.servers.push({
|
||||
address: previous.servers[i].address,
|
||||
from: previous.servers[i].type,
|
||||
to: 'Unknown'
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Check if there are any severs that don't exist
|
||||
for (j = 0; j < current.servers.length; j++) {
|
||||
found = false;
|
||||
|
||||
// Go over all the previous servers
|
||||
for (i = 0; i < previous.servers.length; i++) {
|
||||
if (previous.servers[i].address.toLowerCase() === current.servers[j].address.toLowerCase()) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Add the server to the diff
|
||||
if (!found) {
|
||||
diff.servers.push({
|
||||
address: current.servers[j].address,
|
||||
from: 'Unknown',
|
||||
to: current.servers[j].type
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Got through all the servers
|
||||
for (i = 0; i < previous.servers.length; i++) {
|
||||
var prevServer = previous.servers[i];
|
||||
|
||||
// Go through all current servers
|
||||
for (j = 0; j < current.servers.length; j++) {
|
||||
var currServer = current.servers[j];
|
||||
|
||||
// Matching server
|
||||
if (prevServer.address.toLowerCase() === currServer.address.toLowerCase()) {
|
||||
// We had a change in state
|
||||
if (prevServer.type !== currServer.type) {
|
||||
diff.servers.push({
|
||||
address: prevServer.address,
|
||||
from: prevServer.type,
|
||||
to: currServer.type
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return difference
|
||||
return diff;
|
||||
}
|
||||
|
||||
/**
|
||||
* Shared function to determine clusterTime for a given topology
|
||||
*
|
||||
* @param {*} topology
|
||||
* @param {*} clusterTime
|
||||
*/
|
||||
function resolveClusterTime(topology, $clusterTime) {
|
||||
if (topology.clusterTime == null) {
|
||||
topology.clusterTime = $clusterTime;
|
||||
} else {
|
||||
if ($clusterTime.clusterTime.greaterThan(topology.clusterTime.clusterTime)) {
|
||||
topology.clusterTime = $clusterTime;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: this is a temporary move until the topologies can be more formally refactored
|
||||
// to share code.
|
||||
const SessionMixins = {
|
||||
endSessions: function(sessions, callback) {
|
||||
if (!Array.isArray(sessions)) {
|
||||
sessions = [sessions];
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// When connected to a sharded cluster the endSessions command
|
||||
// can be sent to any mongos. When connected to a replica set the
|
||||
// endSessions command MUST be sent to the primary if the primary
|
||||
// is available, otherwise it MUST be sent to any available secondary.
|
||||
// Is it enough to use: ReadPreference.primaryPreferred ?
|
||||
this.command(
|
||||
'admin.$cmd',
|
||||
{ endSessions: sessions },
|
||||
{ readPreference: ReadPreference.primaryPreferred },
|
||||
() => {
|
||||
// intentionally ignored, per spec
|
||||
if (typeof callback === 'function') callback();
|
||||
}
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
function topologyType(topology) {
|
||||
if (topology.description) {
|
||||
return topology.description.type;
|
||||
}
|
||||
|
||||
if (topology.type === 'mongos') {
|
||||
return TopologyType.Sharded;
|
||||
} else if (topology.type === 'replset') {
|
||||
return TopologyType.ReplicaSetWithPrimary;
|
||||
}
|
||||
|
||||
return TopologyType.Single;
|
||||
}
|
||||
|
||||
const RETRYABLE_WIRE_VERSION = 6;
|
||||
|
||||
/**
|
||||
* Determines whether the provided topology supports retryable writes
|
||||
*
|
||||
* @param {Mongos|Replset} topology
|
||||
*/
|
||||
const isRetryableWritesSupported = function(topology) {
|
||||
const maxWireVersion = topology.lastIsMaster().maxWireVersion;
|
||||
if (maxWireVersion < RETRYABLE_WIRE_VERSION) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!topology.logicalSessionTimeoutMinutes) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (topologyType(topology) === TopologyType.Single) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
const MMAPv1_RETRY_WRITES_ERROR_MESSAGE =
|
||||
'This MongoDB deployment does not support retryable writes. Please add retryWrites=false to your connection string.';
|
||||
|
||||
function getMMAPError(err) {
|
||||
if (err.code !== MMAPv1_RETRY_WRITES_ERROR_CODE || !err.errmsg.includes('Transaction numbers')) {
|
||||
return err;
|
||||
}
|
||||
|
||||
// According to the retryable writes spec, we must replace the error message in this case.
|
||||
// We need to replace err.message so the thrown message is correct and we need to replace err.errmsg to meet the spec requirement.
|
||||
const newErr = new MongoError({
|
||||
message: MMAPv1_RETRY_WRITES_ERROR_MESSAGE,
|
||||
errmsg: MMAPv1_RETRY_WRITES_ERROR_MESSAGE,
|
||||
originalError: err
|
||||
});
|
||||
return newErr;
|
||||
}
|
||||
|
||||
// NOTE: only used for legacy topology types
|
||||
function legacyIsRetryableWriteError(err, topology) {
|
||||
if (!(err instanceof MongoError)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// if pre-4.4 server, then add error label if its a retryable write error
|
||||
if (
|
||||
isRetryableWritesSupported(topology) &&
|
||||
(err instanceof MongoNetworkError ||
|
||||
(maxWireVersion(topology) < 9 && isRetryableWriteError(err)))
|
||||
) {
|
||||
err.addErrorLabel('RetryableWriteError');
|
||||
}
|
||||
|
||||
return err.hasErrorLabel('RetryableWriteError');
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
SessionMixins,
|
||||
resolveClusterTime,
|
||||
inquireServerState,
|
||||
getTopologyType,
|
||||
emitServerDescriptionChanged,
|
||||
emitTopologyDescriptionChanged,
|
||||
cloneOptions,
|
||||
createCompressionInfo,
|
||||
clone,
|
||||
diff,
|
||||
Interval,
|
||||
Timeout,
|
||||
isRetryableWritesSupported,
|
||||
getMMAPError,
|
||||
topologyType,
|
||||
legacyIsRetryableWriteError
|
||||
};
|
||||
179
node_modules/mongodb/lib/core/transactions.js
generated
vendored
Normal file
179
node_modules/mongodb/lib/core/transactions.js
generated
vendored
Normal file
@@ -0,0 +1,179 @@
|
||||
'use strict';
|
||||
const MongoError = require('./error').MongoError;
|
||||
const ReadPreference = require('./topologies/read_preference');
|
||||
const ReadConcern = require('../read_concern');
|
||||
const WriteConcern = require('../write_concern');
|
||||
|
||||
let TxnState;
|
||||
let stateMachine;
|
||||
|
||||
(() => {
|
||||
const NO_TRANSACTION = 'NO_TRANSACTION';
|
||||
const STARTING_TRANSACTION = 'STARTING_TRANSACTION';
|
||||
const TRANSACTION_IN_PROGRESS = 'TRANSACTION_IN_PROGRESS';
|
||||
const TRANSACTION_COMMITTED = 'TRANSACTION_COMMITTED';
|
||||
const TRANSACTION_COMMITTED_EMPTY = 'TRANSACTION_COMMITTED_EMPTY';
|
||||
const TRANSACTION_ABORTED = 'TRANSACTION_ABORTED';
|
||||
|
||||
TxnState = {
|
||||
NO_TRANSACTION,
|
||||
STARTING_TRANSACTION,
|
||||
TRANSACTION_IN_PROGRESS,
|
||||
TRANSACTION_COMMITTED,
|
||||
TRANSACTION_COMMITTED_EMPTY,
|
||||
TRANSACTION_ABORTED
|
||||
};
|
||||
|
||||
stateMachine = {
|
||||
[NO_TRANSACTION]: [NO_TRANSACTION, STARTING_TRANSACTION],
|
||||
[STARTING_TRANSACTION]: [
|
||||
TRANSACTION_IN_PROGRESS,
|
||||
TRANSACTION_COMMITTED,
|
||||
TRANSACTION_COMMITTED_EMPTY,
|
||||
TRANSACTION_ABORTED
|
||||
],
|
||||
[TRANSACTION_IN_PROGRESS]: [
|
||||
TRANSACTION_IN_PROGRESS,
|
||||
TRANSACTION_COMMITTED,
|
||||
TRANSACTION_ABORTED
|
||||
],
|
||||
[TRANSACTION_COMMITTED]: [
|
||||
TRANSACTION_COMMITTED,
|
||||
TRANSACTION_COMMITTED_EMPTY,
|
||||
STARTING_TRANSACTION,
|
||||
NO_TRANSACTION
|
||||
],
|
||||
[TRANSACTION_ABORTED]: [STARTING_TRANSACTION, NO_TRANSACTION],
|
||||
[TRANSACTION_COMMITTED_EMPTY]: [TRANSACTION_COMMITTED_EMPTY, NO_TRANSACTION]
|
||||
};
|
||||
})();
|
||||
|
||||
/**
|
||||
* The MongoDB ReadConcern, which allows for control of the consistency and isolation properties
|
||||
* of the data read from replica sets and replica set shards.
|
||||
* @typedef {Object} ReadConcern
|
||||
* @property {'local'|'available'|'majority'|'linearizable'|'snapshot'} level The readConcern Level
|
||||
* @see https://docs.mongodb.com/manual/reference/read-concern/
|
||||
*/
|
||||
|
||||
/**
|
||||
* A MongoDB WriteConcern, which describes the level of acknowledgement
|
||||
* requested from MongoDB for write operations.
|
||||
* @typedef {Object} WriteConcern
|
||||
* @property {number|'majority'|string} [w=1] requests acknowledgement that the write operation has
|
||||
* propagated to a specified number of mongod hosts
|
||||
* @property {boolean} [j=false] requests acknowledgement from MongoDB that the write operation has
|
||||
* been written to the journal
|
||||
* @property {number} [wtimeout] a time limit, in milliseconds, for the write concern
|
||||
* @see https://docs.mongodb.com/manual/reference/write-concern/
|
||||
*/
|
||||
|
||||
/**
|
||||
* Configuration options for a transaction.
|
||||
* @typedef {Object} TransactionOptions
|
||||
* @property {ReadConcern} [readConcern] A default read concern for commands in this transaction
|
||||
* @property {WriteConcern} [writeConcern] A default writeConcern for commands in this transaction
|
||||
* @property {ReadPreference} [readPreference] A default read preference for commands in this transaction
|
||||
*/
|
||||
|
||||
/**
|
||||
* A class maintaining state related to a server transaction. Internal Only
|
||||
* @ignore
|
||||
*/
|
||||
class Transaction {
|
||||
/**
|
||||
* Create a transaction
|
||||
*
|
||||
* @ignore
|
||||
* @param {TransactionOptions} [options] Optional settings
|
||||
*/
|
||||
constructor(options) {
|
||||
options = options || {};
|
||||
|
||||
this.state = TxnState.NO_TRANSACTION;
|
||||
this.options = {};
|
||||
|
||||
const writeConcern = WriteConcern.fromOptions(options);
|
||||
if (writeConcern) {
|
||||
if (writeConcern.w <= 0) {
|
||||
throw new MongoError('Transactions do not support unacknowledged write concern');
|
||||
}
|
||||
|
||||
this.options.writeConcern = writeConcern;
|
||||
}
|
||||
|
||||
if (options.readConcern) {
|
||||
this.options.readConcern = ReadConcern.fromOptions(options);
|
||||
}
|
||||
|
||||
if (options.readPreference) {
|
||||
this.options.readPreference = ReadPreference.fromOptions(options);
|
||||
}
|
||||
|
||||
if (options.maxCommitTimeMS) {
|
||||
this.options.maxTimeMS = options.maxCommitTimeMS;
|
||||
}
|
||||
|
||||
// TODO: This isn't technically necessary
|
||||
this._pinnedServer = undefined;
|
||||
this._recoveryToken = undefined;
|
||||
}
|
||||
|
||||
get server() {
|
||||
return this._pinnedServer;
|
||||
}
|
||||
|
||||
get recoveryToken() {
|
||||
return this._recoveryToken;
|
||||
}
|
||||
|
||||
get isPinned() {
|
||||
return !!this.server;
|
||||
}
|
||||
|
||||
/**
|
||||
* @ignore
|
||||
* @return Whether this session is presently in a transaction
|
||||
*/
|
||||
get isActive() {
|
||||
return (
|
||||
[TxnState.STARTING_TRANSACTION, TxnState.TRANSACTION_IN_PROGRESS].indexOf(this.state) !== -1
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Transition the transaction in the state machine
|
||||
* @ignore
|
||||
* @param {TxnState} state The new state to transition to
|
||||
*/
|
||||
transition(nextState) {
|
||||
const nextStates = stateMachine[this.state];
|
||||
if (nextStates && nextStates.indexOf(nextState) !== -1) {
|
||||
this.state = nextState;
|
||||
if (this.state === TxnState.NO_TRANSACTION || this.state === TxnState.STARTING_TRANSACTION) {
|
||||
this.unpinServer();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
throw new MongoError(
|
||||
`Attempted illegal state transition from [${this.state}] to [${nextState}]`
|
||||
);
|
||||
}
|
||||
|
||||
pinServer(server) {
|
||||
if (this.isActive) {
|
||||
this._pinnedServer = server;
|
||||
}
|
||||
}
|
||||
|
||||
unpinServer() {
|
||||
this._pinnedServer = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
function isTransactionCommand(command) {
|
||||
return !!(command.commitTransaction || command.abortTransaction);
|
||||
}
|
||||
|
||||
module.exports = { TxnState, Transaction, isTransactionCommand };
|
||||
723
node_modules/mongodb/lib/core/uri_parser.js
generated
vendored
Normal file
723
node_modules/mongodb/lib/core/uri_parser.js
generated
vendored
Normal file
@@ -0,0 +1,723 @@
|
||||
'use strict';
|
||||
const URL = require('url');
|
||||
const qs = require('querystring');
|
||||
const dns = require('dns');
|
||||
const MongoParseError = require('./error').MongoParseError;
|
||||
const ReadPreference = require('./topologies/read_preference');
|
||||
|
||||
/**
|
||||
* The following regular expression validates a connection string and breaks the
|
||||
* provide string into the following capture groups: [protocol, username, password, hosts]
|
||||
*/
|
||||
const HOSTS_RX = /(mongodb(?:\+srv|)):\/\/(?: (?:[^:]*) (?: : ([^@]*) )? @ )?([^/?]*)(?:\/|)(.*)/;
|
||||
|
||||
/**
|
||||
* Determines whether a provided address matches the provided parent domain in order
|
||||
* to avoid certain attack vectors.
|
||||
*
|
||||
* @param {String} srvAddress The address to check against a domain
|
||||
* @param {String} parentDomain The domain to check the provided address against
|
||||
* @return {Boolean} Whether the provided address matches the parent domain
|
||||
*/
|
||||
function matchesParentDomain(srvAddress, parentDomain) {
|
||||
const regex = /^.*?\./;
|
||||
const srv = `.${srvAddress.replace(regex, '')}`;
|
||||
const parent = `.${parentDomain.replace(regex, '')}`;
|
||||
return srv.endsWith(parent);
|
||||
}
|
||||
|
||||
/**
|
||||
* Lookup a `mongodb+srv` connection string, combine the parts and reparse it as a normal
|
||||
* connection string.
|
||||
*
|
||||
* @param {string} uri The connection string to parse
|
||||
* @param {object} options Optional user provided connection string options
|
||||
* @param {function} callback
|
||||
*/
|
||||
function parseSrvConnectionString(uri, options, callback) {
|
||||
const result = URL.parse(uri, true);
|
||||
|
||||
if (options.directConnection || options.directconnection) {
|
||||
return callback(new MongoParseError('directConnection not supported with SRV URI'));
|
||||
}
|
||||
|
||||
if (result.hostname.split('.').length < 3) {
|
||||
return callback(new MongoParseError('URI does not have hostname, domain name and tld'));
|
||||
}
|
||||
|
||||
result.domainLength = result.hostname.split('.').length;
|
||||
if (result.pathname && result.pathname.match(',')) {
|
||||
return callback(new MongoParseError('Invalid URI, cannot contain multiple hostnames'));
|
||||
}
|
||||
|
||||
if (result.port) {
|
||||
return callback(new MongoParseError(`Ports not accepted with '${PROTOCOL_MONGODB_SRV}' URIs`));
|
||||
}
|
||||
|
||||
// Resolve the SRV record and use the result as the list of hosts to connect to.
|
||||
const lookupAddress = result.host;
|
||||
dns.resolveSrv(`_mongodb._tcp.${lookupAddress}`, (err, addresses) => {
|
||||
if (err) return callback(err);
|
||||
|
||||
if (addresses.length === 0) {
|
||||
return callback(new MongoParseError('No addresses found at host'));
|
||||
}
|
||||
|
||||
for (let i = 0; i < addresses.length; i++) {
|
||||
if (!matchesParentDomain(addresses[i].name, result.hostname, result.domainLength)) {
|
||||
return callback(
|
||||
new MongoParseError('Server record does not share hostname with parent URI')
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the original URL to a non-SRV URL.
|
||||
result.protocol = 'mongodb';
|
||||
result.host = addresses.map(address => `${address.name}:${address.port}`).join(',');
|
||||
|
||||
// Default to SSL true if it's not specified.
|
||||
if (
|
||||
!('ssl' in options) &&
|
||||
(!result.search || !('ssl' in result.query) || result.query.ssl === null)
|
||||
) {
|
||||
result.query.ssl = true;
|
||||
}
|
||||
|
||||
// Resolve TXT record and add options from there if they exist.
|
||||
dns.resolveTxt(lookupAddress, (err, record) => {
|
||||
if (err) {
|
||||
if (err.code !== 'ENODATA') {
|
||||
return callback(err);
|
||||
}
|
||||
record = null;
|
||||
}
|
||||
|
||||
if (record) {
|
||||
if (record.length > 1) {
|
||||
return callback(new MongoParseError('Multiple text records not allowed'));
|
||||
}
|
||||
|
||||
record = qs.parse(record[0].join(''));
|
||||
if (Object.keys(record).some(key => key !== 'authSource' && key !== 'replicaSet')) {
|
||||
return callback(
|
||||
new MongoParseError('Text record must only set `authSource` or `replicaSet`')
|
||||
);
|
||||
}
|
||||
|
||||
result.query = Object.assign({}, record, result.query);
|
||||
}
|
||||
|
||||
// Set completed options back into the URL object.
|
||||
result.search = qs.stringify(result.query);
|
||||
|
||||
const finalString = URL.format(result);
|
||||
parseConnectionString(finalString, options, (err, ret) => {
|
||||
if (err) {
|
||||
callback(err);
|
||||
return;
|
||||
}
|
||||
|
||||
callback(null, Object.assign({}, ret, { srvHost: lookupAddress }));
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a query string item according to the connection string spec
|
||||
*
|
||||
* @param {string} key The key for the parsed value
|
||||
* @param {Array|String} value The value to parse
|
||||
* @return {Array|Object|String} The parsed value
|
||||
*/
|
||||
function parseQueryStringItemValue(key, value) {
|
||||
if (Array.isArray(value)) {
|
||||
// deduplicate and simplify arrays
|
||||
value = value.filter((v, idx) => value.indexOf(v) === idx);
|
||||
if (value.length === 1) value = value[0];
|
||||
} else if (value.indexOf(':') > 0) {
|
||||
value = value.split(',').reduce((result, pair) => {
|
||||
const parts = pair.split(':');
|
||||
result[parts[0]] = parseQueryStringItemValue(key, parts[1]);
|
||||
return result;
|
||||
}, {});
|
||||
} else if (value.indexOf(',') > 0) {
|
||||
value = value.split(',').map(v => {
|
||||
return parseQueryStringItemValue(key, v);
|
||||
});
|
||||
} else if (value.toLowerCase() === 'true' || value.toLowerCase() === 'false') {
|
||||
value = value.toLowerCase() === 'true';
|
||||
} else if (!Number.isNaN(value) && !STRING_OPTIONS.has(key)) {
|
||||
const numericValue = parseFloat(value);
|
||||
if (!Number.isNaN(numericValue)) {
|
||||
value = parseFloat(value);
|
||||
}
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
// Options that are known boolean types
|
||||
const BOOLEAN_OPTIONS = new Set([
|
||||
'slaveok',
|
||||
'slave_ok',
|
||||
'sslvalidate',
|
||||
'fsync',
|
||||
'safe',
|
||||
'retrywrites',
|
||||
'j'
|
||||
]);
|
||||
|
||||
// Known string options, only used to bypass Number coercion in `parseQueryStringItemValue`
|
||||
const STRING_OPTIONS = new Set(['authsource', 'replicaset']);
|
||||
|
||||
// Supported text representations of auth mechanisms
|
||||
// NOTE: this list exists in native already, if it is merged here we should deduplicate
|
||||
const AUTH_MECHANISMS = new Set([
|
||||
'GSSAPI',
|
||||
'MONGODB-AWS',
|
||||
'MONGODB-X509',
|
||||
'MONGODB-CR',
|
||||
'DEFAULT',
|
||||
'SCRAM-SHA-1',
|
||||
'SCRAM-SHA-256',
|
||||
'PLAIN'
|
||||
]);
|
||||
|
||||
// Lookup table used to translate normalized (lower-cased) forms of connection string
|
||||
// options to their expected camelCase version
|
||||
const CASE_TRANSLATION = {
|
||||
replicaset: 'replicaSet',
|
||||
connecttimeoutms: 'connectTimeoutMS',
|
||||
sockettimeoutms: 'socketTimeoutMS',
|
||||
maxpoolsize: 'maxPoolSize',
|
||||
minpoolsize: 'minPoolSize',
|
||||
maxidletimems: 'maxIdleTimeMS',
|
||||
waitqueuemultiple: 'waitQueueMultiple',
|
||||
waitqueuetimeoutms: 'waitQueueTimeoutMS',
|
||||
wtimeoutms: 'wtimeoutMS',
|
||||
readconcern: 'readConcern',
|
||||
readconcernlevel: 'readConcernLevel',
|
||||
readpreference: 'readPreference',
|
||||
maxstalenessseconds: 'maxStalenessSeconds',
|
||||
readpreferencetags: 'readPreferenceTags',
|
||||
authsource: 'authSource',
|
||||
authmechanism: 'authMechanism',
|
||||
authmechanismproperties: 'authMechanismProperties',
|
||||
gssapiservicename: 'gssapiServiceName',
|
||||
localthresholdms: 'localThresholdMS',
|
||||
serverselectiontimeoutms: 'serverSelectionTimeoutMS',
|
||||
serverselectiontryonce: 'serverSelectionTryOnce',
|
||||
heartbeatfrequencyms: 'heartbeatFrequencyMS',
|
||||
retrywrites: 'retryWrites',
|
||||
uuidrepresentation: 'uuidRepresentation',
|
||||
zlibcompressionlevel: 'zlibCompressionLevel',
|
||||
tlsallowinvalidcertificates: 'tlsAllowInvalidCertificates',
|
||||
tlsallowinvalidhostnames: 'tlsAllowInvalidHostnames',
|
||||
tlsinsecure: 'tlsInsecure',
|
||||
tlscafile: 'tlsCAFile',
|
||||
tlscertificatekeyfile: 'tlsCertificateKeyFile',
|
||||
tlscertificatekeyfilepassword: 'tlsCertificateKeyFilePassword',
|
||||
wtimeout: 'wTimeoutMS',
|
||||
j: 'journal',
|
||||
directconnection: 'directConnection'
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets the value for `key`, allowing for any required translation
|
||||
*
|
||||
* @param {object} obj The object to set the key on
|
||||
* @param {string} key The key to set the value for
|
||||
* @param {*} value The value to set
|
||||
* @param {object} options The options used for option parsing
|
||||
*/
|
||||
function applyConnectionStringOption(obj, key, value, options) {
|
||||
// simple key translation
|
||||
if (key === 'journal') {
|
||||
key = 'j';
|
||||
} else if (key === 'wtimeoutms') {
|
||||
key = 'wtimeout';
|
||||
}
|
||||
|
||||
// more complicated translation
|
||||
if (BOOLEAN_OPTIONS.has(key)) {
|
||||
value = value === 'true' || value === true;
|
||||
} else if (key === 'appname') {
|
||||
value = decodeURIComponent(value);
|
||||
} else if (key === 'readconcernlevel') {
|
||||
obj['readConcernLevel'] = value;
|
||||
key = 'readconcern';
|
||||
value = { level: value };
|
||||
}
|
||||
|
||||
// simple validation
|
||||
if (key === 'compressors') {
|
||||
value = Array.isArray(value) ? value : [value];
|
||||
|
||||
if (!value.every(c => c === 'snappy' || c === 'zlib')) {
|
||||
throw new MongoParseError(
|
||||
'Value for `compressors` must be at least one of: `snappy`, `zlib`'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (key === 'authmechanism' && !AUTH_MECHANISMS.has(value)) {
|
||||
throw new MongoParseError(
|
||||
`Value for authMechanism must be one of: ${Array.from(AUTH_MECHANISMS).join(
|
||||
', '
|
||||
)}, found: ${value}`
|
||||
);
|
||||
}
|
||||
|
||||
if (key === 'readpreference' && !ReadPreference.isValid(value)) {
|
||||
throw new MongoParseError(
|
||||
'Value for `readPreference` must be one of: `primary`, `primaryPreferred`, `secondary`, `secondaryPreferred`, `nearest`'
|
||||
);
|
||||
}
|
||||
|
||||
if (key === 'zlibcompressionlevel' && (value < -1 || value > 9)) {
|
||||
throw new MongoParseError('zlibCompressionLevel must be an integer between -1 and 9');
|
||||
}
|
||||
|
||||
// special cases
|
||||
if (key === 'compressors' || key === 'zlibcompressionlevel') {
|
||||
obj.compression = obj.compression || {};
|
||||
obj = obj.compression;
|
||||
}
|
||||
|
||||
if (key === 'authmechanismproperties') {
|
||||
if (typeof value.SERVICE_NAME === 'string') obj.gssapiServiceName = value.SERVICE_NAME;
|
||||
if (typeof value.SERVICE_REALM === 'string') obj.gssapiServiceRealm = value.SERVICE_REALM;
|
||||
if (typeof value.CANONICALIZE_HOST_NAME !== 'undefined') {
|
||||
obj.gssapiCanonicalizeHostName = value.CANONICALIZE_HOST_NAME;
|
||||
}
|
||||
}
|
||||
|
||||
if (key === 'readpreferencetags') {
|
||||
value = Array.isArray(value) ? splitArrayOfMultipleReadPreferenceTags(value) : [value];
|
||||
}
|
||||
|
||||
// set the actual value
|
||||
if (options.caseTranslate && CASE_TRANSLATION[key]) {
|
||||
obj[CASE_TRANSLATION[key]] = value;
|
||||
return;
|
||||
}
|
||||
|
||||
obj[key] = value;
|
||||
}
|
||||
|
||||
const USERNAME_REQUIRED_MECHANISMS = new Set([
|
||||
'GSSAPI',
|
||||
'MONGODB-CR',
|
||||
'PLAIN',
|
||||
'SCRAM-SHA-1',
|
||||
'SCRAM-SHA-256'
|
||||
]);
|
||||
|
||||
function splitArrayOfMultipleReadPreferenceTags(value) {
|
||||
const parsedTags = [];
|
||||
|
||||
for (let i = 0; i < value.length; i++) {
|
||||
parsedTags[i] = {};
|
||||
value[i].split(',').forEach(individualTag => {
|
||||
const splitTag = individualTag.split(':');
|
||||
parsedTags[i][splitTag[0]] = splitTag[1];
|
||||
});
|
||||
}
|
||||
|
||||
return parsedTags;
|
||||
}
|
||||
|
||||
/**
|
||||
* Modifies the parsed connection string object taking into account expectations we
|
||||
* have for authentication-related options.
|
||||
*
|
||||
* @param {object} parsed The parsed connection string result
|
||||
* @return The parsed connection string result possibly modified for auth expectations
|
||||
*/
|
||||
function applyAuthExpectations(parsed) {
|
||||
if (parsed.options == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
const options = parsed.options;
|
||||
const authSource = options.authsource || options.authSource;
|
||||
if (authSource != null) {
|
||||
parsed.auth = Object.assign({}, parsed.auth, { db: authSource });
|
||||
}
|
||||
|
||||
const authMechanism = options.authmechanism || options.authMechanism;
|
||||
if (authMechanism != null) {
|
||||
if (
|
||||
USERNAME_REQUIRED_MECHANISMS.has(authMechanism) &&
|
||||
(!parsed.auth || parsed.auth.username == null)
|
||||
) {
|
||||
throw new MongoParseError(`Username required for mechanism \`${authMechanism}\``);
|
||||
}
|
||||
|
||||
if (authMechanism === 'GSSAPI') {
|
||||
if (authSource != null && authSource !== '$external') {
|
||||
throw new MongoParseError(
|
||||
`Invalid source \`${authSource}\` for mechanism \`${authMechanism}\` specified.`
|
||||
);
|
||||
}
|
||||
|
||||
parsed.auth = Object.assign({}, parsed.auth, { db: '$external' });
|
||||
}
|
||||
|
||||
if (authMechanism === 'MONGODB-AWS') {
|
||||
if (authSource != null && authSource !== '$external') {
|
||||
throw new MongoParseError(
|
||||
`Invalid source \`${authSource}\` for mechanism \`${authMechanism}\` specified.`
|
||||
);
|
||||
}
|
||||
|
||||
parsed.auth = Object.assign({}, parsed.auth, { db: '$external' });
|
||||
}
|
||||
|
||||
if (authMechanism === 'MONGODB-X509') {
|
||||
if (parsed.auth && parsed.auth.password != null) {
|
||||
throw new MongoParseError(`Password not allowed for mechanism \`${authMechanism}\``);
|
||||
}
|
||||
|
||||
if (authSource != null && authSource !== '$external') {
|
||||
throw new MongoParseError(
|
||||
`Invalid source \`${authSource}\` for mechanism \`${authMechanism}\` specified.`
|
||||
);
|
||||
}
|
||||
|
||||
parsed.auth = Object.assign({}, parsed.auth, { db: '$external' });
|
||||
}
|
||||
|
||||
if (authMechanism === 'PLAIN') {
|
||||
if (parsed.auth && parsed.auth.db == null) {
|
||||
parsed.auth = Object.assign({}, parsed.auth, { db: '$external' });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// default to `admin` if nothing else was resolved
|
||||
if (parsed.auth && parsed.auth.db == null) {
|
||||
parsed.auth = Object.assign({}, parsed.auth, { db: 'admin' });
|
||||
}
|
||||
|
||||
return parsed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a query string according the connection string spec.
|
||||
*
|
||||
* @param {String} query The query string to parse
|
||||
* @param {object} [options] The options used for options parsing
|
||||
* @return {Object|Error} The parsed query string as an object, or an error if one was encountered
|
||||
*/
|
||||
function parseQueryString(query, options) {
|
||||
const result = {};
|
||||
let parsedQueryString = qs.parse(query);
|
||||
|
||||
checkTLSOptions(parsedQueryString);
|
||||
|
||||
for (const key in parsedQueryString) {
|
||||
const value = parsedQueryString[key];
|
||||
if (value === '' || value == null) {
|
||||
throw new MongoParseError('Incomplete key value pair for option');
|
||||
}
|
||||
|
||||
const normalizedKey = key.toLowerCase();
|
||||
const parsedValue = parseQueryStringItemValue(normalizedKey, value);
|
||||
applyConnectionStringOption(result, normalizedKey, parsedValue, options);
|
||||
}
|
||||
|
||||
// special cases for known deprecated options
|
||||
if (result.wtimeout && result.wtimeoutms) {
|
||||
delete result.wtimeout;
|
||||
console.warn('Unsupported option `wtimeout` specified');
|
||||
}
|
||||
|
||||
return Object.keys(result).length ? result : null;
|
||||
}
|
||||
|
||||
/// Adds support for modern `tls` variants of out `ssl` options
|
||||
function translateTLSOptions(queryString) {
|
||||
if (queryString.tls) {
|
||||
queryString.ssl = queryString.tls;
|
||||
}
|
||||
|
||||
if (queryString.tlsInsecure) {
|
||||
queryString.checkServerIdentity = false;
|
||||
queryString.sslValidate = false;
|
||||
} else {
|
||||
Object.assign(queryString, {
|
||||
checkServerIdentity: queryString.tlsAllowInvalidHostnames ? false : true,
|
||||
sslValidate: queryString.tlsAllowInvalidCertificates ? false : true
|
||||
});
|
||||
}
|
||||
|
||||
if (queryString.tlsCAFile) {
|
||||
queryString.ssl = true;
|
||||
queryString.sslCA = queryString.tlsCAFile;
|
||||
}
|
||||
|
||||
if (queryString.tlsCertificateKeyFile) {
|
||||
queryString.ssl = true;
|
||||
if (queryString.tlsCertificateFile) {
|
||||
queryString.sslCert = queryString.tlsCertificateFile;
|
||||
queryString.sslKey = queryString.tlsCertificateKeyFile;
|
||||
} else {
|
||||
queryString.sslKey = queryString.tlsCertificateKeyFile;
|
||||
queryString.sslCert = queryString.tlsCertificateKeyFile;
|
||||
}
|
||||
}
|
||||
|
||||
if (queryString.tlsCertificateKeyFilePassword) {
|
||||
queryString.ssl = true;
|
||||
queryString.sslPass = queryString.tlsCertificateKeyFilePassword;
|
||||
}
|
||||
|
||||
return queryString;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks a query string for invalid tls options according to the URI options spec.
|
||||
*
|
||||
* @param {string} queryString The query string to check
|
||||
* @throws {MongoParseError}
|
||||
*/
|
||||
function checkTLSOptions(queryString) {
|
||||
const queryStringKeys = Object.keys(queryString);
|
||||
if (
|
||||
queryStringKeys.indexOf('tlsInsecure') !== -1 &&
|
||||
(queryStringKeys.indexOf('tlsAllowInvalidCertificates') !== -1 ||
|
||||
queryStringKeys.indexOf('tlsAllowInvalidHostnames') !== -1)
|
||||
) {
|
||||
throw new MongoParseError(
|
||||
'The `tlsInsecure` option cannot be used with `tlsAllowInvalidCertificates` or `tlsAllowInvalidHostnames`.'
|
||||
);
|
||||
}
|
||||
|
||||
const tlsValue = assertTlsOptionsAreEqual('tls', queryString, queryStringKeys);
|
||||
const sslValue = assertTlsOptionsAreEqual('ssl', queryString, queryStringKeys);
|
||||
|
||||
if (tlsValue != null && sslValue != null) {
|
||||
if (tlsValue !== sslValue) {
|
||||
throw new MongoParseError('All values of `tls` and `ssl` must be the same.');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks a query string to ensure all tls/ssl options are the same.
|
||||
*
|
||||
* @param {string} key The key (tls or ssl) to check
|
||||
* @param {string} queryString The query string to check
|
||||
* @throws {MongoParseError}
|
||||
* @return The value of the tls/ssl option
|
||||
*/
|
||||
function assertTlsOptionsAreEqual(optionName, queryString, queryStringKeys) {
|
||||
const queryStringHasTLSOption = queryStringKeys.indexOf(optionName) !== -1;
|
||||
|
||||
let optionValue;
|
||||
if (Array.isArray(queryString[optionName])) {
|
||||
optionValue = queryString[optionName][0];
|
||||
} else {
|
||||
optionValue = queryString[optionName];
|
||||
}
|
||||
|
||||
if (queryStringHasTLSOption) {
|
||||
if (Array.isArray(queryString[optionName])) {
|
||||
const firstValue = queryString[optionName][0];
|
||||
queryString[optionName].forEach(tlsValue => {
|
||||
if (tlsValue !== firstValue) {
|
||||
throw new MongoParseError(`All values of ${optionName} must be the same.`);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return optionValue;
|
||||
}
|
||||
|
||||
const PROTOCOL_MONGODB = 'mongodb';
|
||||
const PROTOCOL_MONGODB_SRV = 'mongodb+srv';
|
||||
const SUPPORTED_PROTOCOLS = [PROTOCOL_MONGODB, PROTOCOL_MONGODB_SRV];
|
||||
|
||||
/**
|
||||
* Parses a MongoDB connection string
|
||||
*
|
||||
* @param {*} uri the MongoDB connection string to parse
|
||||
* @param {object} [options] Optional settings.
|
||||
* @param {boolean} [options.caseTranslate] Whether the parser should translate options back into camelCase after normalization
|
||||
* @param {parseCallback} callback
|
||||
*/
|
||||
function parseConnectionString(uri, options, callback) {
|
||||
if (typeof options === 'function') (callback = options), (options = {});
|
||||
options = Object.assign({}, { caseTranslate: true }, options);
|
||||
|
||||
// Check for bad uris before we parse
|
||||
try {
|
||||
URL.parse(uri);
|
||||
} catch (e) {
|
||||
return callback(new MongoParseError('URI malformed, cannot be parsed'));
|
||||
}
|
||||
|
||||
const cap = uri.match(HOSTS_RX);
|
||||
if (!cap) {
|
||||
return callback(new MongoParseError('Invalid connection string'));
|
||||
}
|
||||
|
||||
const protocol = cap[1];
|
||||
if (SUPPORTED_PROTOCOLS.indexOf(protocol) === -1) {
|
||||
return callback(new MongoParseError('Invalid protocol provided'));
|
||||
}
|
||||
|
||||
const dbAndQuery = cap[4].split('?');
|
||||
const db = dbAndQuery.length > 0 ? dbAndQuery[0] : null;
|
||||
const query = dbAndQuery.length > 1 ? dbAndQuery[1] : null;
|
||||
|
||||
let parsedOptions;
|
||||
try {
|
||||
parsedOptions = parseQueryString(query, options);
|
||||
} catch (parseError) {
|
||||
return callback(parseError);
|
||||
}
|
||||
|
||||
parsedOptions = Object.assign({}, parsedOptions, options);
|
||||
|
||||
if (protocol === PROTOCOL_MONGODB_SRV) {
|
||||
return parseSrvConnectionString(uri, parsedOptions, callback);
|
||||
}
|
||||
|
||||
const auth = { username: null, password: null, db: db && db !== '' ? qs.unescape(db) : null };
|
||||
if (parsedOptions.auth) {
|
||||
// maintain support for legacy options passed into `MongoClient`
|
||||
if (parsedOptions.auth.username) auth.username = parsedOptions.auth.username;
|
||||
if (parsedOptions.auth.user) auth.username = parsedOptions.auth.user;
|
||||
if (parsedOptions.auth.password) auth.password = parsedOptions.auth.password;
|
||||
} else {
|
||||
if (parsedOptions.username) auth.username = parsedOptions.username;
|
||||
if (parsedOptions.user) auth.username = parsedOptions.user;
|
||||
if (parsedOptions.password) auth.password = parsedOptions.password;
|
||||
}
|
||||
|
||||
if (cap[4].split('?')[0].indexOf('@') !== -1) {
|
||||
return callback(new MongoParseError('Unescaped slash in userinfo section'));
|
||||
}
|
||||
|
||||
const authorityParts = cap[3].split('@');
|
||||
if (authorityParts.length > 2) {
|
||||
return callback(new MongoParseError('Unescaped at-sign in authority section'));
|
||||
}
|
||||
|
||||
if (authorityParts[0] == null || authorityParts[0] === '') {
|
||||
return callback(new MongoParseError('No username provided in authority section'));
|
||||
}
|
||||
|
||||
if (authorityParts.length > 1) {
|
||||
const authParts = authorityParts.shift().split(':');
|
||||
if (authParts.length > 2) {
|
||||
return callback(new MongoParseError('Unescaped colon in authority section'));
|
||||
}
|
||||
|
||||
if (authParts[0] === '') {
|
||||
return callback(new MongoParseError('Invalid empty username provided'));
|
||||
}
|
||||
|
||||
if (!auth.username) auth.username = qs.unescape(authParts[0]);
|
||||
if (!auth.password) auth.password = authParts[1] ? qs.unescape(authParts[1]) : null;
|
||||
}
|
||||
|
||||
let hostParsingError = null;
|
||||
const hosts = authorityParts
|
||||
.shift()
|
||||
.split(',')
|
||||
.map(host => {
|
||||
let parsedHost = URL.parse(`mongodb://${host}`);
|
||||
if (parsedHost.path === '/:') {
|
||||
hostParsingError = new MongoParseError('Double colon in host identifier');
|
||||
return null;
|
||||
}
|
||||
|
||||
// heuristically determine if we're working with a domain socket
|
||||
if (host.match(/\.sock/)) {
|
||||
parsedHost.hostname = qs.unescape(host);
|
||||
parsedHost.port = null;
|
||||
}
|
||||
|
||||
if (Number.isNaN(parsedHost.port)) {
|
||||
hostParsingError = new MongoParseError('Invalid port (non-numeric string)');
|
||||
return;
|
||||
}
|
||||
|
||||
const result = {
|
||||
host: parsedHost.hostname,
|
||||
port: parsedHost.port ? parseInt(parsedHost.port) : 27017
|
||||
};
|
||||
|
||||
if (result.port === 0) {
|
||||
hostParsingError = new MongoParseError('Invalid port (zero) with hostname');
|
||||
return;
|
||||
}
|
||||
|
||||
if (result.port > 65535) {
|
||||
hostParsingError = new MongoParseError('Invalid port (larger than 65535) with hostname');
|
||||
return;
|
||||
}
|
||||
|
||||
if (result.port < 0) {
|
||||
hostParsingError = new MongoParseError('Invalid port (negative number)');
|
||||
return;
|
||||
}
|
||||
|
||||
return result;
|
||||
})
|
||||
.filter(host => !!host);
|
||||
|
||||
if (hostParsingError) {
|
||||
return callback(hostParsingError);
|
||||
}
|
||||
|
||||
if (hosts.length === 0 || hosts[0].host === '' || hosts[0].host === null) {
|
||||
return callback(new MongoParseError('No hostname or hostnames provided in connection string'));
|
||||
}
|
||||
|
||||
const directConnection = !!parsedOptions.directConnection;
|
||||
if (directConnection && hosts.length !== 1) {
|
||||
// If the option is set to true, the driver MUST validate that there is exactly one host given
|
||||
// in the host list in the URI, and fail client creation otherwise.
|
||||
return callback(new MongoParseError('directConnection option requires exactly one host'));
|
||||
}
|
||||
|
||||
// NOTE: this behavior will go away in v4.0, we will always auto discover there
|
||||
if (
|
||||
parsedOptions.directConnection == null &&
|
||||
hosts.length === 1 &&
|
||||
parsedOptions.replicaSet == null
|
||||
) {
|
||||
parsedOptions.directConnection = true;
|
||||
}
|
||||
|
||||
const result = {
|
||||
hosts: hosts,
|
||||
auth: auth.db || auth.username ? auth : null,
|
||||
options: Object.keys(parsedOptions).length ? parsedOptions : null
|
||||
};
|
||||
|
||||
if (result.auth && result.auth.db) {
|
||||
result.defaultDatabase = result.auth.db;
|
||||
} else {
|
||||
result.defaultDatabase = 'test';
|
||||
}
|
||||
|
||||
// support modern `tls` variants to SSL options
|
||||
result.options = translateTLSOptions(result.options);
|
||||
|
||||
try {
|
||||
applyAuthExpectations(result);
|
||||
} catch (authError) {
|
||||
return callback(authError);
|
||||
}
|
||||
|
||||
callback(null, result);
|
||||
}
|
||||
|
||||
module.exports = parseConnectionString;
|
||||
297
node_modules/mongodb/lib/core/utils.js
generated
vendored
Normal file
297
node_modules/mongodb/lib/core/utils.js
generated
vendored
Normal file
@@ -0,0 +1,297 @@
|
||||
'use strict';
|
||||
const os = require('os');
|
||||
const crypto = require('crypto');
|
||||
const requireOptional = require('require_optional');
|
||||
|
||||
/**
|
||||
* Generate a UUIDv4
|
||||
*/
|
||||
const uuidV4 = () => {
|
||||
const result = crypto.randomBytes(16);
|
||||
result[6] = (result[6] & 0x0f) | 0x40;
|
||||
result[8] = (result[8] & 0x3f) | 0x80;
|
||||
return result;
|
||||
};
|
||||
|
||||
/**
|
||||
* Relays events for a given listener and emitter
|
||||
*
|
||||
* @param {EventEmitter} listener the EventEmitter to listen to the events from
|
||||
* @param {EventEmitter} emitter the EventEmitter to relay the events to
|
||||
*/
|
||||
function relayEvents(listener, emitter, events) {
|
||||
events.forEach(eventName => listener.on(eventName, event => emitter.emit(eventName, event)));
|
||||
}
|
||||
|
||||
function retrieveKerberos() {
|
||||
let kerberos;
|
||||
|
||||
try {
|
||||
kerberos = requireOptional('kerberos');
|
||||
} catch (err) {
|
||||
if (err.code === 'MODULE_NOT_FOUND') {
|
||||
throw new Error('The `kerberos` module was not found. Please install it and try again.');
|
||||
}
|
||||
|
||||
throw err;
|
||||
}
|
||||
|
||||
return kerberos;
|
||||
}
|
||||
|
||||
// Throw an error if an attempt to use EJSON is made when it is not installed
|
||||
const noEJSONError = function() {
|
||||
throw new Error('The `mongodb-extjson` module was not found. Please install it and try again.');
|
||||
};
|
||||
|
||||
// Facilitate loading EJSON optionally
|
||||
function retrieveEJSON() {
|
||||
let EJSON = null;
|
||||
try {
|
||||
EJSON = requireOptional('mongodb-extjson');
|
||||
} catch (error) {} // eslint-disable-line
|
||||
if (!EJSON) {
|
||||
EJSON = {
|
||||
parse: noEJSONError,
|
||||
deserialize: noEJSONError,
|
||||
serialize: noEJSONError,
|
||||
stringify: noEJSONError,
|
||||
setBSONModule: noEJSONError,
|
||||
BSON: noEJSONError
|
||||
};
|
||||
}
|
||||
|
||||
return EJSON;
|
||||
}
|
||||
|
||||
/**
|
||||
* A helper function for determining `maxWireVersion` between legacy and new topology
|
||||
* instances
|
||||
*
|
||||
* @private
|
||||
* @param {(Topology|Server)} topologyOrServer
|
||||
*/
|
||||
function maxWireVersion(topologyOrServer) {
|
||||
if (topologyOrServer) {
|
||||
if (topologyOrServer.ismaster) {
|
||||
return topologyOrServer.ismaster.maxWireVersion;
|
||||
}
|
||||
|
||||
if (typeof topologyOrServer.lastIsMaster === 'function') {
|
||||
const lastIsMaster = topologyOrServer.lastIsMaster();
|
||||
if (lastIsMaster) {
|
||||
return lastIsMaster.maxWireVersion;
|
||||
}
|
||||
}
|
||||
|
||||
if (topologyOrServer.description) {
|
||||
return topologyOrServer.description.maxWireVersion;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Checks that collation is supported by server.
|
||||
*
|
||||
* @param {Server} [server] to check against
|
||||
* @param {object} [cmd] object where collation may be specified
|
||||
* @param {function} [callback] callback function
|
||||
* @return true if server does not support collation
|
||||
*/
|
||||
function collationNotSupported(server, cmd) {
|
||||
return cmd && cmd.collation && maxWireVersion(server) < 5;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a given value is a Promise
|
||||
*
|
||||
* @param {*} maybePromise
|
||||
* @return true if the provided value is a Promise
|
||||
*/
|
||||
function isPromiseLike(maybePromise) {
|
||||
return maybePromise && typeof maybePromise.then === 'function';
|
||||
}
|
||||
|
||||
/**
|
||||
* Applies the function `eachFn` to each item in `arr`, in parallel.
|
||||
*
|
||||
* @param {array} arr an array of items to asynchronusly iterate over
|
||||
* @param {function} eachFn A function to call on each item of the array. The callback signature is `(item, callback)`, where the callback indicates iteration is complete.
|
||||
* @param {function} callback The callback called after every item has been iterated
|
||||
*/
|
||||
function eachAsync(arr, eachFn, callback) {
|
||||
arr = arr || [];
|
||||
|
||||
let idx = 0;
|
||||
let awaiting = 0;
|
||||
for (idx = 0; idx < arr.length; ++idx) {
|
||||
awaiting++;
|
||||
eachFn(arr[idx], eachCallback);
|
||||
}
|
||||
|
||||
if (awaiting === 0) {
|
||||
callback();
|
||||
return;
|
||||
}
|
||||
|
||||
function eachCallback(err) {
|
||||
awaiting--;
|
||||
if (err) {
|
||||
callback(err);
|
||||
return;
|
||||
}
|
||||
|
||||
if (idx === arr.length && awaiting <= 0) {
|
||||
callback();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function eachAsyncSeries(arr, eachFn, callback) {
|
||||
arr = arr || [];
|
||||
|
||||
let idx = 0;
|
||||
let awaiting = arr.length;
|
||||
if (awaiting === 0) {
|
||||
callback();
|
||||
return;
|
||||
}
|
||||
|
||||
function eachCallback(err) {
|
||||
idx++;
|
||||
awaiting--;
|
||||
if (err) {
|
||||
callback(err);
|
||||
return;
|
||||
}
|
||||
|
||||
if (idx === arr.length && awaiting <= 0) {
|
||||
callback();
|
||||
return;
|
||||
}
|
||||
|
||||
eachFn(arr[idx], eachCallback);
|
||||
}
|
||||
|
||||
eachFn(arr[idx], eachCallback);
|
||||
}
|
||||
|
||||
function isUnifiedTopology(topology) {
|
||||
return topology.description != null;
|
||||
}
|
||||
|
||||
function arrayStrictEqual(arr, arr2) {
|
||||
if (!Array.isArray(arr) || !Array.isArray(arr2)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return arr.length === arr2.length && arr.every((elt, idx) => elt === arr2[idx]);
|
||||
}
|
||||
|
||||
function tagsStrictEqual(tags, tags2) {
|
||||
const tagsKeys = Object.keys(tags);
|
||||
const tags2Keys = Object.keys(tags2);
|
||||
return tagsKeys.length === tags2Keys.length && tagsKeys.every(key => tags2[key] === tags[key]);
|
||||
}
|
||||
|
||||
function errorStrictEqual(lhs, rhs) {
|
||||
if (lhs === rhs) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if ((lhs == null && rhs != null) || (lhs != null && rhs == null)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (lhs.constructor.name !== rhs.constructor.name) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (lhs.message !== rhs.message) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
function makeStateMachine(stateTable) {
|
||||
return function stateTransition(target, newState) {
|
||||
const legalStates = stateTable[target.s.state];
|
||||
if (legalStates && legalStates.indexOf(newState) < 0) {
|
||||
throw new TypeError(
|
||||
`illegal state transition from [${target.s.state}] => [${newState}], allowed: [${legalStates}]`
|
||||
);
|
||||
}
|
||||
|
||||
target.emit('stateChanged', target.s.state, newState);
|
||||
target.s.state = newState;
|
||||
};
|
||||
}
|
||||
|
||||
function makeClientMetadata(options) {
|
||||
options = options || {};
|
||||
|
||||
const metadata = {
|
||||
driver: {
|
||||
name: 'nodejs',
|
||||
version: require('../../package.json').version
|
||||
},
|
||||
os: {
|
||||
type: os.type(),
|
||||
name: process.platform,
|
||||
architecture: process.arch,
|
||||
version: os.release()
|
||||
},
|
||||
platform: `'Node.js ${process.version}, ${os.endianness} (${
|
||||
options.useUnifiedTopology ? 'unified' : 'legacy'
|
||||
})`
|
||||
};
|
||||
|
||||
// support optionally provided wrapping driver info
|
||||
if (options.driverInfo) {
|
||||
if (options.driverInfo.name) {
|
||||
metadata.driver.name = `${metadata.driver.name}|${options.driverInfo.name}`;
|
||||
}
|
||||
|
||||
if (options.driverInfo.version) {
|
||||
metadata.version = `${metadata.driver.version}|${options.driverInfo.version}`;
|
||||
}
|
||||
|
||||
if (options.driverInfo.platform) {
|
||||
metadata.platform = `${metadata.platform}|${options.driverInfo.platform}`;
|
||||
}
|
||||
}
|
||||
|
||||
if (options.appname) {
|
||||
// MongoDB requires the appname not exceed a byte length of 128
|
||||
const buffer = Buffer.from(options.appname);
|
||||
metadata.application = {
|
||||
name: buffer.length > 128 ? buffer.slice(0, 128).toString('utf8') : options.appname
|
||||
};
|
||||
}
|
||||
|
||||
return metadata;
|
||||
}
|
||||
|
||||
const noop = () => {};
|
||||
|
||||
module.exports = {
|
||||
uuidV4,
|
||||
relayEvents,
|
||||
collationNotSupported,
|
||||
retrieveEJSON,
|
||||
retrieveKerberos,
|
||||
maxWireVersion,
|
||||
isPromiseLike,
|
||||
eachAsync,
|
||||
eachAsyncSeries,
|
||||
isUnifiedTopology,
|
||||
arrayStrictEqual,
|
||||
tagsStrictEqual,
|
||||
errorStrictEqual,
|
||||
makeStateMachine,
|
||||
makeClientMetadata,
|
||||
noop
|
||||
};
|
||||
177
node_modules/mongodb/lib/core/wireprotocol/command.js
generated
vendored
Normal file
177
node_modules/mongodb/lib/core/wireprotocol/command.js
generated
vendored
Normal file
@@ -0,0 +1,177 @@
|
||||
'use strict';
|
||||
|
||||
const Query = require('../connection/commands').Query;
|
||||
const Msg = require('../connection/msg').Msg;
|
||||
const MongoError = require('../error').MongoError;
|
||||
const getReadPreference = require('./shared').getReadPreference;
|
||||
const isSharded = require('./shared').isSharded;
|
||||
const databaseNamespace = require('./shared').databaseNamespace;
|
||||
const isTransactionCommand = require('../transactions').isTransactionCommand;
|
||||
const applySession = require('../sessions').applySession;
|
||||
const MongoNetworkError = require('../error').MongoNetworkError;
|
||||
const maxWireVersion = require('../utils').maxWireVersion;
|
||||
|
||||
function isClientEncryptionEnabled(server) {
|
||||
const wireVersion = maxWireVersion(server);
|
||||
return wireVersion && server.autoEncrypter;
|
||||
}
|
||||
|
||||
function command(server, ns, cmd, options, callback) {
|
||||
if (typeof options === 'function') (callback = options), (options = {});
|
||||
options = options || {};
|
||||
|
||||
if (cmd == null) {
|
||||
return callback(new MongoError(`command ${JSON.stringify(cmd)} does not return a cursor`));
|
||||
}
|
||||
|
||||
if (!isClientEncryptionEnabled(server)) {
|
||||
_command(server, ns, cmd, options, callback);
|
||||
return;
|
||||
}
|
||||
|
||||
const wireVersion = maxWireVersion(server);
|
||||
if (typeof wireVersion !== 'number' || wireVersion < 8) {
|
||||
callback(new MongoError('Auto-encryption requires a minimum MongoDB version of 4.2'));
|
||||
return;
|
||||
}
|
||||
|
||||
_cryptCommand(server, ns, cmd, options, callback);
|
||||
}
|
||||
|
||||
function _command(server, ns, cmd, options, callback) {
|
||||
const bson = server.s.bson;
|
||||
const pool = server.s.pool;
|
||||
const readPreference = getReadPreference(cmd, options);
|
||||
const shouldUseOpMsg = supportsOpMsg(server);
|
||||
const session = options.session;
|
||||
|
||||
let clusterTime = server.clusterTime;
|
||||
let finalCmd = Object.assign({}, cmd);
|
||||
if (hasSessionSupport(server) && session) {
|
||||
if (
|
||||
session.clusterTime &&
|
||||
session.clusterTime.clusterTime.greaterThan(clusterTime.clusterTime)
|
||||
) {
|
||||
clusterTime = session.clusterTime;
|
||||
}
|
||||
|
||||
const err = applySession(session, finalCmd, options);
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
}
|
||||
|
||||
// if we have a known cluster time, gossip it
|
||||
if (clusterTime) {
|
||||
finalCmd.$clusterTime = clusterTime;
|
||||
}
|
||||
|
||||
if (isSharded(server) && !shouldUseOpMsg && readPreference && readPreference.mode !== 'primary') {
|
||||
finalCmd = {
|
||||
$query: finalCmd,
|
||||
$readPreference: readPreference.toJSON()
|
||||
};
|
||||
}
|
||||
|
||||
const commandOptions = Object.assign(
|
||||
{
|
||||
command: true,
|
||||
numberToSkip: 0,
|
||||
numberToReturn: -1,
|
||||
checkKeys: false
|
||||
},
|
||||
options
|
||||
);
|
||||
|
||||
// This value is not overridable
|
||||
commandOptions.slaveOk = readPreference.slaveOk();
|
||||
|
||||
const cmdNs = `${databaseNamespace(ns)}.$cmd`;
|
||||
const message = shouldUseOpMsg
|
||||
? new Msg(bson, cmdNs, finalCmd, commandOptions)
|
||||
: new Query(bson, cmdNs, finalCmd, commandOptions);
|
||||
|
||||
const inTransaction = session && (session.inTransaction() || isTransactionCommand(finalCmd));
|
||||
const commandResponseHandler = inTransaction
|
||||
? function(err) {
|
||||
// We need to add a TransientTransactionError errorLabel, as stated in the transaction spec.
|
||||
if (
|
||||
err &&
|
||||
err instanceof MongoNetworkError &&
|
||||
!err.hasErrorLabel('TransientTransactionError')
|
||||
) {
|
||||
err.addErrorLabel('TransientTransactionError');
|
||||
}
|
||||
|
||||
if (
|
||||
!cmd.commitTransaction &&
|
||||
err &&
|
||||
err instanceof MongoError &&
|
||||
err.hasErrorLabel('TransientTransactionError')
|
||||
) {
|
||||
session.transaction.unpinServer();
|
||||
}
|
||||
|
||||
return callback.apply(null, arguments);
|
||||
}
|
||||
: callback;
|
||||
|
||||
try {
|
||||
pool.write(message, commandOptions, commandResponseHandler);
|
||||
} catch (err) {
|
||||
commandResponseHandler(err);
|
||||
}
|
||||
}
|
||||
|
||||
function hasSessionSupport(topology) {
|
||||
if (topology == null) return false;
|
||||
if (topology.description) {
|
||||
return topology.description.maxWireVersion >= 6;
|
||||
}
|
||||
|
||||
return topology.ismaster == null ? false : topology.ismaster.maxWireVersion >= 6;
|
||||
}
|
||||
|
||||
function supportsOpMsg(topologyOrServer) {
|
||||
const description = topologyOrServer.ismaster
|
||||
? topologyOrServer.ismaster
|
||||
: topologyOrServer.description;
|
||||
|
||||
if (description == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return description.maxWireVersion >= 6 && description.__nodejs_mock_server__ == null;
|
||||
}
|
||||
|
||||
function _cryptCommand(server, ns, cmd, options, callback) {
|
||||
const autoEncrypter = server.autoEncrypter;
|
||||
function commandResponseHandler(err, response) {
|
||||
if (err || response == null) {
|
||||
callback(err, response);
|
||||
return;
|
||||
}
|
||||
|
||||
autoEncrypter.decrypt(response.result, options, (err, decrypted) => {
|
||||
if (err) {
|
||||
callback(err, null);
|
||||
return;
|
||||
}
|
||||
|
||||
response.result = decrypted;
|
||||
response.message.documents = [decrypted];
|
||||
callback(null, response);
|
||||
});
|
||||
}
|
||||
|
||||
autoEncrypter.encrypt(ns, cmd, options, (err, encrypted) => {
|
||||
if (err) {
|
||||
callback(err, null);
|
||||
return;
|
||||
}
|
||||
|
||||
_command(server, ns, encrypted, options, commandResponseHandler);
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = command;
|
||||
73
node_modules/mongodb/lib/core/wireprotocol/compression.js
generated
vendored
Normal file
73
node_modules/mongodb/lib/core/wireprotocol/compression.js
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
'use strict';
|
||||
|
||||
const Snappy = require('../connection/utils').retrieveSnappy();
|
||||
const zlib = require('zlib');
|
||||
|
||||
const compressorIDs = {
|
||||
snappy: 1,
|
||||
zlib: 2
|
||||
};
|
||||
|
||||
const uncompressibleCommands = new Set([
|
||||
'ismaster',
|
||||
'saslStart',
|
||||
'saslContinue',
|
||||
'getnonce',
|
||||
'authenticate',
|
||||
'createUser',
|
||||
'updateUser',
|
||||
'copydbSaslStart',
|
||||
'copydbgetnonce',
|
||||
'copydb'
|
||||
]);
|
||||
|
||||
// Facilitate compressing a message using an agreed compressor
|
||||
function compress(self, dataToBeCompressed, callback) {
|
||||
switch (self.options.agreedCompressor) {
|
||||
case 'snappy':
|
||||
Snappy.compress(dataToBeCompressed, callback);
|
||||
break;
|
||||
case 'zlib':
|
||||
// Determine zlibCompressionLevel
|
||||
var zlibOptions = {};
|
||||
if (self.options.zlibCompressionLevel) {
|
||||
zlibOptions.level = self.options.zlibCompressionLevel;
|
||||
}
|
||||
zlib.deflate(dataToBeCompressed, zlibOptions, callback);
|
||||
break;
|
||||
default:
|
||||
throw new Error(
|
||||
'Attempt to compress message using unknown compressor "' +
|
||||
self.options.agreedCompressor +
|
||||
'".'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Decompress a message using the given compressor
|
||||
function decompress(compressorID, compressedData, callback) {
|
||||
if (compressorID < 0 || compressorID > compressorIDs.length) {
|
||||
throw new Error(
|
||||
'Server sent message compressed using an unsupported compressor. (Received compressor ID ' +
|
||||
compressorID +
|
||||
')'
|
||||
);
|
||||
}
|
||||
switch (compressorID) {
|
||||
case compressorIDs.snappy:
|
||||
Snappy.uncompress(compressedData, callback);
|
||||
break;
|
||||
case compressorIDs.zlib:
|
||||
zlib.inflate(compressedData, callback);
|
||||
break;
|
||||
default:
|
||||
callback(null, compressedData);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
compressorIDs,
|
||||
uncompressibleCommands,
|
||||
compress,
|
||||
decompress
|
||||
};
|
||||
13
node_modules/mongodb/lib/core/wireprotocol/constants.js
generated
vendored
Normal file
13
node_modules/mongodb/lib/core/wireprotocol/constants.js
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
'use strict';
|
||||
|
||||
const MIN_SUPPORTED_SERVER_VERSION = '2.6';
|
||||
const MAX_SUPPORTED_SERVER_VERSION = '4.4';
|
||||
const MIN_SUPPORTED_WIRE_VERSION = 2;
|
||||
const MAX_SUPPORTED_WIRE_VERSION = 9;
|
||||
|
||||
module.exports = {
|
||||
MIN_SUPPORTED_SERVER_VERSION,
|
||||
MAX_SUPPORTED_SERVER_VERSION,
|
||||
MIN_SUPPORTED_WIRE_VERSION,
|
||||
MAX_SUPPORTED_WIRE_VERSION
|
||||
};
|
||||
95
node_modules/mongodb/lib/core/wireprotocol/get_more.js
generated
vendored
Normal file
95
node_modules/mongodb/lib/core/wireprotocol/get_more.js
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
'use strict';
|
||||
|
||||
const GetMore = require('../connection/commands').GetMore;
|
||||
const retrieveBSON = require('../connection/utils').retrieveBSON;
|
||||
const MongoError = require('../error').MongoError;
|
||||
const MongoNetworkError = require('../error').MongoNetworkError;
|
||||
const BSON = retrieveBSON();
|
||||
const Long = BSON.Long;
|
||||
const collectionNamespace = require('./shared').collectionNamespace;
|
||||
const maxWireVersion = require('../utils').maxWireVersion;
|
||||
const applyCommonQueryOptions = require('./shared').applyCommonQueryOptions;
|
||||
const command = require('./command');
|
||||
|
||||
function getMore(server, ns, cursorState, batchSize, options, callback) {
|
||||
options = options || {};
|
||||
|
||||
const wireVersion = maxWireVersion(server);
|
||||
function queryCallback(err, result) {
|
||||
if (err) return callback(err);
|
||||
const response = result.message;
|
||||
|
||||
// If we have a timed out query or a cursor that was killed
|
||||
if (response.cursorNotFound) {
|
||||
return callback(new MongoNetworkError('cursor killed or timed out'), null);
|
||||
}
|
||||
|
||||
if (wireVersion < 4) {
|
||||
const cursorId =
|
||||
typeof response.cursorId === 'number'
|
||||
? Long.fromNumber(response.cursorId)
|
||||
: response.cursorId;
|
||||
|
||||
cursorState.documents = response.documents;
|
||||
cursorState.cursorId = cursorId;
|
||||
|
||||
callback(null, null, response.connection);
|
||||
return;
|
||||
}
|
||||
|
||||
// We have an error detected
|
||||
if (response.documents[0].ok === 0) {
|
||||
return callback(new MongoError(response.documents[0]));
|
||||
}
|
||||
|
||||
// Ensure we have a Long valid cursor id
|
||||
const cursorId =
|
||||
typeof response.documents[0].cursor.id === 'number'
|
||||
? Long.fromNumber(response.documents[0].cursor.id)
|
||||
: response.documents[0].cursor.id;
|
||||
|
||||
cursorState.documents = response.documents[0].cursor.nextBatch;
|
||||
cursorState.cursorId = cursorId;
|
||||
|
||||
callback(null, response.documents[0], response.connection);
|
||||
}
|
||||
|
||||
if (wireVersion < 4) {
|
||||
const bson = server.s.bson;
|
||||
const getMoreOp = new GetMore(bson, ns, cursorState.cursorId, { numberToReturn: batchSize });
|
||||
const queryOptions = applyCommonQueryOptions({}, cursorState);
|
||||
server.s.pool.write(getMoreOp, queryOptions, queryCallback);
|
||||
return;
|
||||
}
|
||||
|
||||
const cursorId =
|
||||
cursorState.cursorId instanceof Long
|
||||
? cursorState.cursorId
|
||||
: Long.fromNumber(cursorState.cursorId);
|
||||
|
||||
const getMoreCmd = {
|
||||
getMore: cursorId,
|
||||
collection: collectionNamespace(ns),
|
||||
batchSize: Math.abs(batchSize)
|
||||
};
|
||||
|
||||
if (cursorState.cmd.tailable && typeof cursorState.cmd.maxAwaitTimeMS === 'number') {
|
||||
getMoreCmd.maxTimeMS = cursorState.cmd.maxAwaitTimeMS;
|
||||
}
|
||||
|
||||
const commandOptions = Object.assign(
|
||||
{
|
||||
returnFieldSelector: null,
|
||||
documentsReturnedIn: 'nextBatch'
|
||||
},
|
||||
options
|
||||
);
|
||||
|
||||
if (cursorState.session) {
|
||||
commandOptions.session = cursorState.session;
|
||||
}
|
||||
|
||||
command(server, ns, getMoreCmd, commandOptions, queryCallback);
|
||||
}
|
||||
|
||||
module.exports = getMore;
|
||||
18
node_modules/mongodb/lib/core/wireprotocol/index.js
generated
vendored
Normal file
18
node_modules/mongodb/lib/core/wireprotocol/index.js
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
'use strict';
|
||||
const writeCommand = require('./write_command');
|
||||
|
||||
module.exports = {
|
||||
insert: function insert(server, ns, ops, options, callback) {
|
||||
writeCommand(server, 'insert', 'documents', ns, ops, options, callback);
|
||||
},
|
||||
update: function update(server, ns, ops, options, callback) {
|
||||
writeCommand(server, 'update', 'updates', ns, ops, options, callback);
|
||||
},
|
||||
remove: function remove(server, ns, ops, options, callback) {
|
||||
writeCommand(server, 'delete', 'deletes', ns, ops, options, callback);
|
||||
},
|
||||
killCursors: require('./kill_cursors'),
|
||||
getMore: require('./get_more'),
|
||||
query: require('./query'),
|
||||
command: require('./command')
|
||||
};
|
||||
70
node_modules/mongodb/lib/core/wireprotocol/kill_cursors.js
generated
vendored
Normal file
70
node_modules/mongodb/lib/core/wireprotocol/kill_cursors.js
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
'use strict';
|
||||
|
||||
const KillCursor = require('../connection/commands').KillCursor;
|
||||
const MongoError = require('../error').MongoError;
|
||||
const MongoNetworkError = require('../error').MongoNetworkError;
|
||||
const collectionNamespace = require('./shared').collectionNamespace;
|
||||
const maxWireVersion = require('../utils').maxWireVersion;
|
||||
const command = require('./command');
|
||||
|
||||
function killCursors(server, ns, cursorState, callback) {
|
||||
callback = typeof callback === 'function' ? callback : () => {};
|
||||
const cursorId = cursorState.cursorId;
|
||||
|
||||
if (maxWireVersion(server) < 4) {
|
||||
const bson = server.s.bson;
|
||||
const pool = server.s.pool;
|
||||
const killCursor = new KillCursor(bson, ns, [cursorId]);
|
||||
const options = {
|
||||
immediateRelease: true,
|
||||
noResponse: true
|
||||
};
|
||||
|
||||
if (typeof cursorState.session === 'object') {
|
||||
options.session = cursorState.session;
|
||||
}
|
||||
|
||||
if (pool && pool.isConnected()) {
|
||||
try {
|
||||
pool.write(killCursor, options, callback);
|
||||
} catch (err) {
|
||||
if (typeof callback === 'function') {
|
||||
callback(err, null);
|
||||
} else {
|
||||
console.warn(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
const killCursorCmd = {
|
||||
killCursors: collectionNamespace(ns),
|
||||
cursors: [cursorId]
|
||||
};
|
||||
|
||||
const options = {};
|
||||
if (typeof cursorState.session === 'object') options.session = cursorState.session;
|
||||
|
||||
command(server, ns, killCursorCmd, options, (err, result) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
|
||||
const response = result.message;
|
||||
if (response.cursorNotFound) {
|
||||
return callback(new MongoNetworkError('cursor killed or timed out'), null);
|
||||
}
|
||||
|
||||
if (!Array.isArray(response.documents) || response.documents.length === 0) {
|
||||
return callback(
|
||||
new MongoError(`invalid killCursors result returned for cursor id ${cursorId}`)
|
||||
);
|
||||
}
|
||||
|
||||
callback(null, response.documents[0]);
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = killCursors;
|
||||
235
node_modules/mongodb/lib/core/wireprotocol/query.js
generated
vendored
Normal file
235
node_modules/mongodb/lib/core/wireprotocol/query.js
generated
vendored
Normal file
@@ -0,0 +1,235 @@
|
||||
'use strict';
|
||||
|
||||
const Query = require('../connection/commands').Query;
|
||||
const MongoError = require('../error').MongoError;
|
||||
const getReadPreference = require('./shared').getReadPreference;
|
||||
const collectionNamespace = require('./shared').collectionNamespace;
|
||||
const isSharded = require('./shared').isSharded;
|
||||
const maxWireVersion = require('../utils').maxWireVersion;
|
||||
const applyCommonQueryOptions = require('./shared').applyCommonQueryOptions;
|
||||
const command = require('./command');
|
||||
|
||||
function query(server, ns, cmd, cursorState, options, callback) {
|
||||
options = options || {};
|
||||
if (cursorState.cursorId != null) {
|
||||
return callback();
|
||||
}
|
||||
|
||||
if (cmd == null) {
|
||||
return callback(new MongoError(`command ${JSON.stringify(cmd)} does not return a cursor`));
|
||||
}
|
||||
|
||||
if (maxWireVersion(server) < 4) {
|
||||
const query = prepareLegacyFindQuery(server, ns, cmd, cursorState, options);
|
||||
const queryOptions = applyCommonQueryOptions({}, cursorState);
|
||||
if (typeof query.documentsReturnedIn === 'string') {
|
||||
queryOptions.documentsReturnedIn = query.documentsReturnedIn;
|
||||
}
|
||||
|
||||
server.s.pool.write(query, queryOptions, callback);
|
||||
return;
|
||||
}
|
||||
|
||||
const readPreference = getReadPreference(cmd, options);
|
||||
const findCmd = prepareFindCommand(server, ns, cmd, cursorState, options);
|
||||
|
||||
// NOTE: This actually modifies the passed in cmd, and our code _depends_ on this
|
||||
// side-effect. Change this ASAP
|
||||
cmd.virtual = false;
|
||||
|
||||
const commandOptions = Object.assign(
|
||||
{
|
||||
documentsReturnedIn: 'firstBatch',
|
||||
numberToReturn: 1,
|
||||
slaveOk: readPreference.slaveOk()
|
||||
},
|
||||
options
|
||||
);
|
||||
|
||||
if (cmd.readPreference) {
|
||||
commandOptions.readPreference = readPreference;
|
||||
}
|
||||
|
||||
if (cursorState.session) {
|
||||
commandOptions.session = cursorState.session;
|
||||
}
|
||||
|
||||
command(server, ns, findCmd, commandOptions, callback);
|
||||
}
|
||||
|
||||
function prepareFindCommand(server, ns, cmd, cursorState) {
|
||||
cursorState.batchSize = cmd.batchSize || cursorState.batchSize;
|
||||
let findCmd = {
|
||||
find: collectionNamespace(ns)
|
||||
};
|
||||
|
||||
if (cmd.query) {
|
||||
if (cmd.query['$query']) {
|
||||
findCmd.filter = cmd.query['$query'];
|
||||
} else {
|
||||
findCmd.filter = cmd.query;
|
||||
}
|
||||
}
|
||||
|
||||
let sortValue = cmd.sort;
|
||||
if (Array.isArray(sortValue)) {
|
||||
const sortObject = {};
|
||||
|
||||
if (sortValue.length > 0 && !Array.isArray(sortValue[0])) {
|
||||
let sortDirection = sortValue[1];
|
||||
if (sortDirection === 'asc') {
|
||||
sortDirection = 1;
|
||||
} else if (sortDirection === 'desc') {
|
||||
sortDirection = -1;
|
||||
}
|
||||
|
||||
sortObject[sortValue[0]] = sortDirection;
|
||||
} else {
|
||||
for (let i = 0; i < sortValue.length; i++) {
|
||||
let sortDirection = sortValue[i][1];
|
||||
if (sortDirection === 'asc') {
|
||||
sortDirection = 1;
|
||||
} else if (sortDirection === 'desc') {
|
||||
sortDirection = -1;
|
||||
}
|
||||
|
||||
sortObject[sortValue[i][0]] = sortDirection;
|
||||
}
|
||||
}
|
||||
|
||||
sortValue = sortObject;
|
||||
}
|
||||
|
||||
if (typeof cmd.allowDiskUse === 'boolean') {
|
||||
findCmd.allowDiskUse = cmd.allowDiskUse;
|
||||
}
|
||||
|
||||
if (cmd.sort) findCmd.sort = sortValue;
|
||||
if (cmd.fields) findCmd.projection = cmd.fields;
|
||||
if (cmd.hint) findCmd.hint = cmd.hint;
|
||||
if (cmd.skip) findCmd.skip = cmd.skip;
|
||||
if (cmd.limit) findCmd.limit = cmd.limit;
|
||||
if (cmd.limit < 0) {
|
||||
findCmd.limit = Math.abs(cmd.limit);
|
||||
findCmd.singleBatch = true;
|
||||
}
|
||||
|
||||
if (typeof cmd.batchSize === 'number') {
|
||||
if (cmd.batchSize < 0) {
|
||||
if (cmd.limit !== 0 && Math.abs(cmd.batchSize) < Math.abs(cmd.limit)) {
|
||||
findCmd.limit = Math.abs(cmd.batchSize);
|
||||
}
|
||||
|
||||
findCmd.singleBatch = true;
|
||||
}
|
||||
|
||||
findCmd.batchSize = Math.abs(cmd.batchSize);
|
||||
}
|
||||
|
||||
if (cmd.comment) findCmd.comment = cmd.comment;
|
||||
if (cmd.maxScan) findCmd.maxScan = cmd.maxScan;
|
||||
if (cmd.maxTimeMS) findCmd.maxTimeMS = cmd.maxTimeMS;
|
||||
if (cmd.min) findCmd.min = cmd.min;
|
||||
if (cmd.max) findCmd.max = cmd.max;
|
||||
findCmd.returnKey = cmd.returnKey ? cmd.returnKey : false;
|
||||
findCmd.showRecordId = cmd.showDiskLoc ? cmd.showDiskLoc : false;
|
||||
if (cmd.snapshot) findCmd.snapshot = cmd.snapshot;
|
||||
if (cmd.tailable) findCmd.tailable = cmd.tailable;
|
||||
if (cmd.oplogReplay) findCmd.oplogReplay = cmd.oplogReplay;
|
||||
if (cmd.noCursorTimeout) findCmd.noCursorTimeout = cmd.noCursorTimeout;
|
||||
if (cmd.awaitData) findCmd.awaitData = cmd.awaitData;
|
||||
if (cmd.awaitdata) findCmd.awaitData = cmd.awaitdata;
|
||||
if (cmd.partial) findCmd.partial = cmd.partial;
|
||||
if (cmd.collation) findCmd.collation = cmd.collation;
|
||||
if (cmd.readConcern) findCmd.readConcern = cmd.readConcern;
|
||||
|
||||
// If we have explain, we need to rewrite the find command
|
||||
// to wrap it in the explain command
|
||||
if (cmd.explain) {
|
||||
findCmd = {
|
||||
explain: findCmd
|
||||
};
|
||||
}
|
||||
|
||||
return findCmd;
|
||||
}
|
||||
|
||||
function prepareLegacyFindQuery(server, ns, cmd, cursorState, options) {
|
||||
options = options || {};
|
||||
const bson = server.s.bson;
|
||||
const readPreference = getReadPreference(cmd, options);
|
||||
cursorState.batchSize = cmd.batchSize || cursorState.batchSize;
|
||||
|
||||
let numberToReturn = 0;
|
||||
if (
|
||||
cursorState.limit < 0 ||
|
||||
(cursorState.limit !== 0 && cursorState.limit < cursorState.batchSize) ||
|
||||
(cursorState.limit > 0 && cursorState.batchSize === 0)
|
||||
) {
|
||||
numberToReturn = cursorState.limit;
|
||||
} else {
|
||||
numberToReturn = cursorState.batchSize;
|
||||
}
|
||||
|
||||
const numberToSkip = cursorState.skip || 0;
|
||||
|
||||
const findCmd = {};
|
||||
if (isSharded(server) && readPreference) {
|
||||
findCmd['$readPreference'] = readPreference.toJSON();
|
||||
}
|
||||
|
||||
if (cmd.sort) findCmd['$orderby'] = cmd.sort;
|
||||
if (cmd.hint) findCmd['$hint'] = cmd.hint;
|
||||
if (cmd.snapshot) findCmd['$snapshot'] = cmd.snapshot;
|
||||
if (typeof cmd.returnKey !== 'undefined') findCmd['$returnKey'] = cmd.returnKey;
|
||||
if (cmd.maxScan) findCmd['$maxScan'] = cmd.maxScan;
|
||||
if (cmd.min) findCmd['$min'] = cmd.min;
|
||||
if (cmd.max) findCmd['$max'] = cmd.max;
|
||||
if (typeof cmd.showDiskLoc !== 'undefined') findCmd['$showDiskLoc'] = cmd.showDiskLoc;
|
||||
if (cmd.comment) findCmd['$comment'] = cmd.comment;
|
||||
if (cmd.maxTimeMS) findCmd['$maxTimeMS'] = cmd.maxTimeMS;
|
||||
if (cmd.explain) {
|
||||
// nToReturn must be 0 (match all) or negative (match N and close cursor)
|
||||
// nToReturn > 0 will give explain results equivalent to limit(0)
|
||||
numberToReturn = -Math.abs(cmd.limit || 0);
|
||||
findCmd['$explain'] = true;
|
||||
}
|
||||
|
||||
findCmd['$query'] = cmd.query;
|
||||
if (cmd.readConcern && cmd.readConcern.level !== 'local') {
|
||||
throw new MongoError(
|
||||
`server find command does not support a readConcern level of ${cmd.readConcern.level}`
|
||||
);
|
||||
}
|
||||
|
||||
if (cmd.readConcern) {
|
||||
cmd = Object.assign({}, cmd);
|
||||
delete cmd['readConcern'];
|
||||
}
|
||||
|
||||
const serializeFunctions =
|
||||
typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false;
|
||||
const ignoreUndefined =
|
||||
typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false;
|
||||
|
||||
const query = new Query(bson, ns, findCmd, {
|
||||
numberToSkip: numberToSkip,
|
||||
numberToReturn: numberToReturn,
|
||||
pre32Limit: typeof cmd.limit !== 'undefined' ? cmd.limit : undefined,
|
||||
checkKeys: false,
|
||||
returnFieldSelector: cmd.fields,
|
||||
serializeFunctions: serializeFunctions,
|
||||
ignoreUndefined: ignoreUndefined
|
||||
});
|
||||
|
||||
if (typeof cmd.tailable === 'boolean') query.tailable = cmd.tailable;
|
||||
if (typeof cmd.oplogReplay === 'boolean') query.oplogReplay = cmd.oplogReplay;
|
||||
if (typeof cmd.noCursorTimeout === 'boolean') query.noCursorTimeout = cmd.noCursorTimeout;
|
||||
if (typeof cmd.awaitData === 'boolean') query.awaitData = cmd.awaitData;
|
||||
if (typeof cmd.partial === 'boolean') query.partial = cmd.partial;
|
||||
|
||||
query.slaveOk = readPreference.slaveOk();
|
||||
return query;
|
||||
}
|
||||
|
||||
module.exports = query;
|
||||
115
node_modules/mongodb/lib/core/wireprotocol/shared.js
generated
vendored
Normal file
115
node_modules/mongodb/lib/core/wireprotocol/shared.js
generated
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
'use strict';
|
||||
|
||||
const ReadPreference = require('../topologies/read_preference');
|
||||
const MongoError = require('../error').MongoError;
|
||||
const ServerType = require('../sdam/common').ServerType;
|
||||
const TopologyDescription = require('../sdam/topology_description').TopologyDescription;
|
||||
|
||||
const MESSAGE_HEADER_SIZE = 16;
|
||||
const COMPRESSION_DETAILS_SIZE = 9; // originalOpcode + uncompressedSize, compressorID
|
||||
|
||||
// OPCODE Numbers
|
||||
// Defined at https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#request-opcodes
|
||||
var opcodes = {
|
||||
OP_REPLY: 1,
|
||||
OP_UPDATE: 2001,
|
||||
OP_INSERT: 2002,
|
||||
OP_QUERY: 2004,
|
||||
OP_GETMORE: 2005,
|
||||
OP_DELETE: 2006,
|
||||
OP_KILL_CURSORS: 2007,
|
||||
OP_COMPRESSED: 2012,
|
||||
OP_MSG: 2013
|
||||
};
|
||||
|
||||
var getReadPreference = function(cmd, options) {
|
||||
// Default to command version of the readPreference
|
||||
var readPreference = cmd.readPreference || new ReadPreference('primary');
|
||||
// If we have an option readPreference override the command one
|
||||
if (options.readPreference) {
|
||||
readPreference = options.readPreference;
|
||||
}
|
||||
|
||||
if (typeof readPreference === 'string') {
|
||||
readPreference = new ReadPreference(readPreference);
|
||||
}
|
||||
|
||||
if (!(readPreference instanceof ReadPreference)) {
|
||||
throw new MongoError('read preference must be a ReadPreference instance');
|
||||
}
|
||||
|
||||
return readPreference;
|
||||
};
|
||||
|
||||
// Parses the header of a wire protocol message
|
||||
var parseHeader = function(message) {
|
||||
return {
|
||||
length: message.readInt32LE(0),
|
||||
requestId: message.readInt32LE(4),
|
||||
responseTo: message.readInt32LE(8),
|
||||
opCode: message.readInt32LE(12)
|
||||
};
|
||||
};
|
||||
|
||||
function applyCommonQueryOptions(queryOptions, options) {
|
||||
Object.assign(queryOptions, {
|
||||
raw: typeof options.raw === 'boolean' ? options.raw : false,
|
||||
promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true,
|
||||
promoteValues: typeof options.promoteValues === 'boolean' ? options.promoteValues : true,
|
||||
promoteBuffers: typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false,
|
||||
monitoring: typeof options.monitoring === 'boolean' ? options.monitoring : false,
|
||||
fullResult: typeof options.fullResult === 'boolean' ? options.fullResult : false
|
||||
});
|
||||
|
||||
if (typeof options.socketTimeout === 'number') {
|
||||
queryOptions.socketTimeout = options.socketTimeout;
|
||||
}
|
||||
|
||||
if (options.session) {
|
||||
queryOptions.session = options.session;
|
||||
}
|
||||
|
||||
if (typeof options.documentsReturnedIn === 'string') {
|
||||
queryOptions.documentsReturnedIn = options.documentsReturnedIn;
|
||||
}
|
||||
|
||||
return queryOptions;
|
||||
}
|
||||
|
||||
function isSharded(topologyOrServer) {
|
||||
if (topologyOrServer.type === 'mongos') return true;
|
||||
if (topologyOrServer.description && topologyOrServer.description.type === ServerType.Mongos) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// NOTE: This is incredibly inefficient, and should be removed once command construction
|
||||
// happens based on `Server` not `Topology`.
|
||||
if (topologyOrServer.description && topologyOrServer.description instanceof TopologyDescription) {
|
||||
const servers = Array.from(topologyOrServer.description.servers.values());
|
||||
return servers.some(server => server.type === ServerType.Mongos);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
function databaseNamespace(ns) {
|
||||
return ns.split('.')[0];
|
||||
}
|
||||
function collectionNamespace(ns) {
|
||||
return ns
|
||||
.split('.')
|
||||
.slice(1)
|
||||
.join('.');
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getReadPreference,
|
||||
MESSAGE_HEADER_SIZE,
|
||||
COMPRESSION_DETAILS_SIZE,
|
||||
opcodes,
|
||||
parseHeader,
|
||||
applyCommonQueryOptions,
|
||||
isSharded,
|
||||
databaseNamespace,
|
||||
collectionNamespace
|
||||
};
|
||||
50
node_modules/mongodb/lib/core/wireprotocol/write_command.js
generated
vendored
Normal file
50
node_modules/mongodb/lib/core/wireprotocol/write_command.js
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
'use strict';
|
||||
|
||||
const MongoError = require('../error').MongoError;
|
||||
const collectionNamespace = require('./shared').collectionNamespace;
|
||||
const command = require('./command');
|
||||
|
||||
function writeCommand(server, type, opsField, ns, ops, options, callback) {
|
||||
if (ops.length === 0) throw new MongoError(`${type} must contain at least one document`);
|
||||
if (typeof options === 'function') {
|
||||
callback = options;
|
||||
options = {};
|
||||
}
|
||||
|
||||
options = options || {};
|
||||
const ordered = typeof options.ordered === 'boolean' ? options.ordered : true;
|
||||
const writeConcern = options.writeConcern;
|
||||
|
||||
const writeCommand = {};
|
||||
writeCommand[type] = collectionNamespace(ns);
|
||||
writeCommand[opsField] = ops;
|
||||
writeCommand.ordered = ordered;
|
||||
|
||||
if (writeConcern && Object.keys(writeConcern).length > 0) {
|
||||
writeCommand.writeConcern = writeConcern;
|
||||
}
|
||||
|
||||
if (options.collation) {
|
||||
for (let i = 0; i < writeCommand[opsField].length; i++) {
|
||||
if (!writeCommand[opsField][i].collation) {
|
||||
writeCommand[opsField][i].collation = options.collation;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (options.bypassDocumentValidation === true) {
|
||||
writeCommand.bypassDocumentValidation = options.bypassDocumentValidation;
|
||||
}
|
||||
|
||||
const commandOptions = Object.assign(
|
||||
{
|
||||
checkKeys: type === 'insert',
|
||||
numberToReturn: 1
|
||||
},
|
||||
options
|
||||
);
|
||||
|
||||
command(server, ns, writeCommand, commandOptions, callback);
|
||||
}
|
||||
|
||||
module.exports = writeCommand;
|
||||
Reference in New Issue
Block a user