Initial commit
This commit is contained in:
67
node_modules/mongodb/lib/core/sdam/common.js
generated
vendored
Normal file
67
node_modules/mongodb/lib/core/sdam/common.js
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
'use strict';
|
||||
|
||||
// shared state names
|
||||
const STATE_CLOSING = 'closing';
|
||||
const STATE_CLOSED = 'closed';
|
||||
const STATE_CONNECTING = 'connecting';
|
||||
const STATE_CONNECTED = 'connected';
|
||||
|
||||
// An enumeration of topology types we know about
|
||||
const TopologyType = {
|
||||
Single: 'Single',
|
||||
ReplicaSetNoPrimary: 'ReplicaSetNoPrimary',
|
||||
ReplicaSetWithPrimary: 'ReplicaSetWithPrimary',
|
||||
Sharded: 'Sharded',
|
||||
Unknown: 'Unknown'
|
||||
};
|
||||
|
||||
// An enumeration of server types we know about
|
||||
const ServerType = {
|
||||
Standalone: 'Standalone',
|
||||
Mongos: 'Mongos',
|
||||
PossiblePrimary: 'PossiblePrimary',
|
||||
RSPrimary: 'RSPrimary',
|
||||
RSSecondary: 'RSSecondary',
|
||||
RSArbiter: 'RSArbiter',
|
||||
RSOther: 'RSOther',
|
||||
RSGhost: 'RSGhost',
|
||||
Unknown: 'Unknown'
|
||||
};
|
||||
|
||||
// helper to get a server's type that works for both legacy and unified topologies
|
||||
function serverType(server) {
|
||||
let description = server.s.description || server.s.serverDescription;
|
||||
if (description.topologyType === TopologyType.Single) return description.servers[0].type;
|
||||
return description.type;
|
||||
}
|
||||
|
||||
const TOPOLOGY_DEFAULTS = {
|
||||
useUnifiedTopology: true,
|
||||
localThresholdMS: 15,
|
||||
serverSelectionTimeoutMS: 30000,
|
||||
heartbeatFrequencyMS: 10000,
|
||||
minHeartbeatFrequencyMS: 500
|
||||
};
|
||||
|
||||
function drainTimerQueue(queue) {
|
||||
queue.forEach(clearTimeout);
|
||||
queue.clear();
|
||||
}
|
||||
|
||||
function clearAndRemoveTimerFrom(timer, timers) {
|
||||
clearTimeout(timer);
|
||||
return timers.delete(timer);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
STATE_CLOSING,
|
||||
STATE_CLOSED,
|
||||
STATE_CONNECTING,
|
||||
STATE_CONNECTED,
|
||||
TOPOLOGY_DEFAULTS,
|
||||
TopologyType,
|
||||
ServerType,
|
||||
serverType,
|
||||
drainTimerQueue,
|
||||
clearAndRemoveTimerFrom
|
||||
};
|
||||
124
node_modules/mongodb/lib/core/sdam/events.js
generated
vendored
Normal file
124
node_modules/mongodb/lib/core/sdam/events.js
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
'use strict';
|
||||
|
||||
/**
|
||||
* Published when server description changes, but does NOT include changes to the RTT.
|
||||
*
|
||||
* @property {Object} topologyId A unique identifier for the topology
|
||||
* @property {ServerAddress} address The address (host/port pair) of the server
|
||||
* @property {ServerDescription} previousDescription The previous server description
|
||||
* @property {ServerDescription} newDescription The new server description
|
||||
*/
|
||||
class ServerDescriptionChangedEvent {
|
||||
constructor(topologyId, address, previousDescription, newDescription) {
|
||||
Object.assign(this, { topologyId, address, previousDescription, newDescription });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Published when server is initialized.
|
||||
*
|
||||
* @property {Object} topologyId A unique identifier for the topology
|
||||
* @property {ServerAddress} address The address (host/port pair) of the server
|
||||
*/
|
||||
class ServerOpeningEvent {
|
||||
constructor(topologyId, address) {
|
||||
Object.assign(this, { topologyId, address });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Published when server is closed.
|
||||
*
|
||||
* @property {ServerAddress} address The address (host/port pair) of the server
|
||||
* @property {Object} topologyId A unique identifier for the topology
|
||||
*/
|
||||
class ServerClosedEvent {
|
||||
constructor(topologyId, address) {
|
||||
Object.assign(this, { topologyId, address });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Published when topology description changes.
|
||||
*
|
||||
* @property {Object} topologyId
|
||||
* @property {TopologyDescription} previousDescription The old topology description
|
||||
* @property {TopologyDescription} newDescription The new topology description
|
||||
*/
|
||||
class TopologyDescriptionChangedEvent {
|
||||
constructor(topologyId, previousDescription, newDescription) {
|
||||
Object.assign(this, { topologyId, previousDescription, newDescription });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Published when topology is initialized.
|
||||
*
|
||||
* @param {Object} topologyId A unique identifier for the topology
|
||||
*/
|
||||
class TopologyOpeningEvent {
|
||||
constructor(topologyId) {
|
||||
Object.assign(this, { topologyId });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Published when topology is closed.
|
||||
*
|
||||
* @param {Object} topologyId A unique identifier for the topology
|
||||
*/
|
||||
class TopologyClosedEvent {
|
||||
constructor(topologyId) {
|
||||
Object.assign(this, { topologyId });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fired when the server monitor’s ismaster command is started - immediately before
|
||||
* the ismaster command is serialized into raw BSON and written to the socket.
|
||||
*
|
||||
* @property {Object} connectionId The connection id for the command
|
||||
*/
|
||||
class ServerHeartbeatStartedEvent {
|
||||
constructor(connectionId) {
|
||||
Object.assign(this, { connectionId });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fired when the server monitor’s ismaster succeeds.
|
||||
*
|
||||
* @param {Number} duration The execution time of the event in ms
|
||||
* @param {Object} reply The command reply
|
||||
* @param {Object} connectionId The connection id for the command
|
||||
*/
|
||||
class ServerHeartbeatSucceededEvent {
|
||||
constructor(duration, reply, connectionId) {
|
||||
Object.assign(this, { connectionId, duration, reply });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fired when the server monitor’s ismaster fails, either with an “ok: 0” or a socket exception.
|
||||
*
|
||||
* @param {Number} duration The execution time of the event in ms
|
||||
* @param {MongoError|Object} failure The command failure
|
||||
* @param {Object} connectionId The connection id for the command
|
||||
*/
|
||||
class ServerHeartbeatFailedEvent {
|
||||
constructor(duration, failure, connectionId) {
|
||||
Object.assign(this, { connectionId, duration, failure });
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
ServerDescriptionChangedEvent,
|
||||
ServerOpeningEvent,
|
||||
ServerClosedEvent,
|
||||
TopologyDescriptionChangedEvent,
|
||||
TopologyOpeningEvent,
|
||||
TopologyClosedEvent,
|
||||
ServerHeartbeatStartedEvent,
|
||||
ServerHeartbeatSucceededEvent,
|
||||
ServerHeartbeatFailedEvent
|
||||
};
|
||||
405
node_modules/mongodb/lib/core/sdam/monitor.js
generated
vendored
Normal file
405
node_modules/mongodb/lib/core/sdam/monitor.js
generated
vendored
Normal file
@@ -0,0 +1,405 @@
|
||||
'use strict';
|
||||
|
||||
const ServerType = require('./common').ServerType;
|
||||
const EventEmitter = require('events');
|
||||
const connect = require('../connection/connect');
|
||||
const Connection = require('../../cmap/connection').Connection;
|
||||
const common = require('./common');
|
||||
const makeStateMachine = require('../utils').makeStateMachine;
|
||||
const MongoNetworkError = require('../error').MongoNetworkError;
|
||||
const BSON = require('../connection/utils').retrieveBSON();
|
||||
const makeInterruptableAsyncInterval = require('../../utils').makeInterruptableAsyncInterval;
|
||||
const calculateDurationInMs = require('../../utils').calculateDurationInMs;
|
||||
const now = require('../../utils').now;
|
||||
|
||||
const sdamEvents = require('./events');
|
||||
const ServerHeartbeatStartedEvent = sdamEvents.ServerHeartbeatStartedEvent;
|
||||
const ServerHeartbeatSucceededEvent = sdamEvents.ServerHeartbeatSucceededEvent;
|
||||
const ServerHeartbeatFailedEvent = sdamEvents.ServerHeartbeatFailedEvent;
|
||||
|
||||
const kServer = Symbol('server');
|
||||
const kMonitorId = Symbol('monitorId');
|
||||
const kConnection = Symbol('connection');
|
||||
const kCancellationToken = Symbol('cancellationToken');
|
||||
const kRTTPinger = Symbol('rttPinger');
|
||||
const kRoundTripTime = Symbol('roundTripTime');
|
||||
|
||||
const STATE_CLOSED = common.STATE_CLOSED;
|
||||
const STATE_CLOSING = common.STATE_CLOSING;
|
||||
const STATE_IDLE = 'idle';
|
||||
const STATE_MONITORING = 'monitoring';
|
||||
const stateTransition = makeStateMachine({
|
||||
[STATE_CLOSING]: [STATE_CLOSING, STATE_IDLE, STATE_CLOSED],
|
||||
[STATE_CLOSED]: [STATE_CLOSED, STATE_MONITORING],
|
||||
[STATE_IDLE]: [STATE_IDLE, STATE_MONITORING, STATE_CLOSING],
|
||||
[STATE_MONITORING]: [STATE_MONITORING, STATE_IDLE, STATE_CLOSING]
|
||||
});
|
||||
|
||||
const INVALID_REQUEST_CHECK_STATES = new Set([STATE_CLOSING, STATE_CLOSED, STATE_MONITORING]);
|
||||
|
||||
function isInCloseState(monitor) {
|
||||
return monitor.s.state === STATE_CLOSED || monitor.s.state === STATE_CLOSING;
|
||||
}
|
||||
|
||||
class Monitor extends EventEmitter {
|
||||
constructor(server, options) {
|
||||
super(options);
|
||||
|
||||
this[kServer] = server;
|
||||
this[kConnection] = undefined;
|
||||
this[kCancellationToken] = new EventEmitter();
|
||||
this[kCancellationToken].setMaxListeners(Infinity);
|
||||
this[kMonitorId] = null;
|
||||
this.s = {
|
||||
state: STATE_CLOSED
|
||||
};
|
||||
|
||||
this.address = server.description.address;
|
||||
this.options = Object.freeze({
|
||||
connectTimeoutMS:
|
||||
typeof options.connectionTimeout === 'number'
|
||||
? options.connectionTimeout
|
||||
: typeof options.connectTimeoutMS === 'number'
|
||||
? options.connectTimeoutMS
|
||||
: 10000,
|
||||
heartbeatFrequencyMS:
|
||||
typeof options.heartbeatFrequencyMS === 'number' ? options.heartbeatFrequencyMS : 10000,
|
||||
minHeartbeatFrequencyMS:
|
||||
typeof options.minHeartbeatFrequencyMS === 'number' ? options.minHeartbeatFrequencyMS : 500
|
||||
});
|
||||
|
||||
// TODO: refactor this to pull it directly from the pool, requires new ConnectionPool integration
|
||||
const connectOptions = Object.assign(
|
||||
{
|
||||
id: '<monitor>',
|
||||
host: server.description.host,
|
||||
port: server.description.port,
|
||||
bson: server.s.bson,
|
||||
connectionType: Connection
|
||||
},
|
||||
server.s.options,
|
||||
this.options,
|
||||
|
||||
// force BSON serialization options
|
||||
{
|
||||
raw: false,
|
||||
promoteLongs: true,
|
||||
promoteValues: true,
|
||||
promoteBuffers: true
|
||||
}
|
||||
);
|
||||
|
||||
// ensure no authentication is used for monitoring
|
||||
delete connectOptions.credentials;
|
||||
this.connectOptions = Object.freeze(connectOptions);
|
||||
}
|
||||
|
||||
connect() {
|
||||
if (this.s.state !== STATE_CLOSED) {
|
||||
return;
|
||||
}
|
||||
|
||||
// start
|
||||
const heartbeatFrequencyMS = this.options.heartbeatFrequencyMS;
|
||||
const minHeartbeatFrequencyMS = this.options.minHeartbeatFrequencyMS;
|
||||
this[kMonitorId] = makeInterruptableAsyncInterval(monitorServer(this), {
|
||||
interval: heartbeatFrequencyMS,
|
||||
minInterval: minHeartbeatFrequencyMS,
|
||||
immediate: true
|
||||
});
|
||||
}
|
||||
|
||||
requestCheck() {
|
||||
if (INVALID_REQUEST_CHECK_STATES.has(this.s.state)) {
|
||||
return;
|
||||
}
|
||||
|
||||
this[kMonitorId].wake();
|
||||
}
|
||||
|
||||
reset() {
|
||||
if (isInCloseState(this)) {
|
||||
return;
|
||||
}
|
||||
|
||||
stateTransition(this, STATE_CLOSING);
|
||||
resetMonitorState(this);
|
||||
|
||||
// restart monitor
|
||||
stateTransition(this, STATE_IDLE);
|
||||
|
||||
// restart monitoring
|
||||
const heartbeatFrequencyMS = this.options.heartbeatFrequencyMS;
|
||||
const minHeartbeatFrequencyMS = this.options.minHeartbeatFrequencyMS;
|
||||
this[kMonitorId] = makeInterruptableAsyncInterval(monitorServer(this), {
|
||||
interval: heartbeatFrequencyMS,
|
||||
minInterval: minHeartbeatFrequencyMS
|
||||
});
|
||||
}
|
||||
|
||||
close() {
|
||||
if (isInCloseState(this)) {
|
||||
return;
|
||||
}
|
||||
|
||||
stateTransition(this, STATE_CLOSING);
|
||||
resetMonitorState(this);
|
||||
|
||||
// close monitor
|
||||
this.emit('close');
|
||||
stateTransition(this, STATE_CLOSED);
|
||||
}
|
||||
}
|
||||
|
||||
function resetMonitorState(monitor) {
|
||||
stateTransition(monitor, STATE_CLOSING);
|
||||
if (monitor[kMonitorId]) {
|
||||
monitor[kMonitorId].stop();
|
||||
monitor[kMonitorId] = null;
|
||||
}
|
||||
|
||||
if (monitor[kRTTPinger]) {
|
||||
monitor[kRTTPinger].close();
|
||||
monitor[kRTTPinger] = undefined;
|
||||
}
|
||||
|
||||
monitor[kCancellationToken].emit('cancel');
|
||||
if (monitor[kMonitorId]) {
|
||||
clearTimeout(monitor[kMonitorId]);
|
||||
monitor[kMonitorId] = undefined;
|
||||
}
|
||||
|
||||
if (monitor[kConnection]) {
|
||||
monitor[kConnection].destroy({ force: true });
|
||||
}
|
||||
}
|
||||
|
||||
function checkServer(monitor, callback) {
|
||||
let start = now();
|
||||
monitor.emit('serverHeartbeatStarted', new ServerHeartbeatStartedEvent(monitor.address));
|
||||
|
||||
function failureHandler(err) {
|
||||
if (monitor[kConnection]) {
|
||||
monitor[kConnection].destroy({ force: true });
|
||||
monitor[kConnection] = undefined;
|
||||
}
|
||||
|
||||
monitor.emit(
|
||||
'serverHeartbeatFailed',
|
||||
new ServerHeartbeatFailedEvent(calculateDurationInMs(start), err, monitor.address)
|
||||
);
|
||||
|
||||
monitor.emit('resetServer', err);
|
||||
monitor.emit('resetConnectionPool');
|
||||
callback(err);
|
||||
}
|
||||
|
||||
if (monitor[kConnection] != null && !monitor[kConnection].closed) {
|
||||
const connectTimeoutMS = monitor.options.connectTimeoutMS;
|
||||
const maxAwaitTimeMS = monitor.options.heartbeatFrequencyMS;
|
||||
const topologyVersion = monitor[kServer].description.topologyVersion;
|
||||
const isAwaitable = topologyVersion != null;
|
||||
|
||||
const cmd = isAwaitable
|
||||
? { ismaster: true, maxAwaitTimeMS, topologyVersion: makeTopologyVersion(topologyVersion) }
|
||||
: { ismaster: true };
|
||||
|
||||
const options = isAwaitable
|
||||
? { socketTimeout: connectTimeoutMS + maxAwaitTimeMS, exhaustAllowed: true }
|
||||
: { socketTimeout: connectTimeoutMS };
|
||||
|
||||
if (isAwaitable && monitor[kRTTPinger] == null) {
|
||||
monitor[kRTTPinger] = new RTTPinger(monitor[kCancellationToken], monitor.connectOptions);
|
||||
}
|
||||
|
||||
monitor[kConnection].command('admin.$cmd', cmd, options, (err, result) => {
|
||||
if (err) {
|
||||
failureHandler(err);
|
||||
return;
|
||||
}
|
||||
|
||||
const isMaster = result.result;
|
||||
const duration = isAwaitable
|
||||
? monitor[kRTTPinger].roundTripTime
|
||||
: calculateDurationInMs(start);
|
||||
|
||||
monitor.emit(
|
||||
'serverHeartbeatSucceeded',
|
||||
new ServerHeartbeatSucceededEvent(duration, isMaster, monitor.address)
|
||||
);
|
||||
|
||||
// if we are using the streaming protocol then we immediately issue another `started`
|
||||
// event, otherwise the "check" is complete and return to the main monitor loop
|
||||
if (isAwaitable && isMaster.topologyVersion) {
|
||||
monitor.emit('serverHeartbeatStarted', new ServerHeartbeatStartedEvent(monitor.address));
|
||||
start = now();
|
||||
} else {
|
||||
if (monitor[kRTTPinger]) {
|
||||
monitor[kRTTPinger].close();
|
||||
monitor[kRTTPinger] = undefined;
|
||||
}
|
||||
|
||||
callback(undefined, isMaster);
|
||||
}
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// connecting does an implicit `ismaster`
|
||||
connect(monitor.connectOptions, monitor[kCancellationToken], (err, conn) => {
|
||||
if (conn && isInCloseState(monitor)) {
|
||||
conn.destroy({ force: true });
|
||||
return;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
monitor[kConnection] = undefined;
|
||||
|
||||
// we already reset the connection pool on network errors in all cases
|
||||
if (!(err instanceof MongoNetworkError)) {
|
||||
monitor.emit('resetConnectionPool');
|
||||
}
|
||||
|
||||
failureHandler(err);
|
||||
return;
|
||||
}
|
||||
|
||||
monitor[kConnection] = conn;
|
||||
monitor.emit(
|
||||
'serverHeartbeatSucceeded',
|
||||
new ServerHeartbeatSucceededEvent(
|
||||
calculateDurationInMs(start),
|
||||
conn.ismaster,
|
||||
monitor.address
|
||||
)
|
||||
);
|
||||
|
||||
callback(undefined, conn.ismaster);
|
||||
});
|
||||
}
|
||||
|
||||
function monitorServer(monitor) {
|
||||
return callback => {
|
||||
stateTransition(monitor, STATE_MONITORING);
|
||||
function done() {
|
||||
if (!isInCloseState(monitor)) {
|
||||
stateTransition(monitor, STATE_IDLE);
|
||||
}
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
// TODO: the next line is a legacy event, remove in v4
|
||||
process.nextTick(() => monitor.emit('monitoring', monitor[kServer]));
|
||||
|
||||
checkServer(monitor, (err, isMaster) => {
|
||||
if (err) {
|
||||
// otherwise an error occured on initial discovery, also bail
|
||||
if (monitor[kServer].description.type === ServerType.Unknown) {
|
||||
monitor.emit('resetServer', err);
|
||||
return done();
|
||||
}
|
||||
}
|
||||
|
||||
// if the check indicates streaming is supported, immediately reschedule monitoring
|
||||
if (isMaster && isMaster.topologyVersion) {
|
||||
setTimeout(() => {
|
||||
if (!isInCloseState(monitor)) {
|
||||
monitor[kMonitorId].wake();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
done();
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
function makeTopologyVersion(tv) {
|
||||
return {
|
||||
processId: tv.processId,
|
||||
counter: BSON.Long.fromNumber(tv.counter)
|
||||
};
|
||||
}
|
||||
|
||||
class RTTPinger {
|
||||
constructor(cancellationToken, options) {
|
||||
this[kConnection] = null;
|
||||
this[kCancellationToken] = cancellationToken;
|
||||
this[kRoundTripTime] = 0;
|
||||
this.closed = false;
|
||||
|
||||
const heartbeatFrequencyMS = options.heartbeatFrequencyMS;
|
||||
this[kMonitorId] = setTimeout(() => measureRoundTripTime(this, options), heartbeatFrequencyMS);
|
||||
}
|
||||
|
||||
get roundTripTime() {
|
||||
return this[kRoundTripTime];
|
||||
}
|
||||
|
||||
close() {
|
||||
this.closed = true;
|
||||
|
||||
clearTimeout(this[kMonitorId]);
|
||||
this[kMonitorId] = undefined;
|
||||
|
||||
if (this[kConnection]) {
|
||||
this[kConnection].destroy({ force: true });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function measureRoundTripTime(rttPinger, options) {
|
||||
const start = now();
|
||||
const cancellationToken = rttPinger[kCancellationToken];
|
||||
const heartbeatFrequencyMS = options.heartbeatFrequencyMS;
|
||||
if (rttPinger.closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
function measureAndReschedule(conn) {
|
||||
if (rttPinger.closed) {
|
||||
conn.destroy({ force: true });
|
||||
return;
|
||||
}
|
||||
|
||||
if (rttPinger[kConnection] == null) {
|
||||
rttPinger[kConnection] = conn;
|
||||
}
|
||||
|
||||
rttPinger[kRoundTripTime] = calculateDurationInMs(start);
|
||||
rttPinger[kMonitorId] = setTimeout(
|
||||
() => measureRoundTripTime(rttPinger, options),
|
||||
heartbeatFrequencyMS
|
||||
);
|
||||
}
|
||||
|
||||
if (rttPinger[kConnection] == null) {
|
||||
connect(options, cancellationToken, (err, conn) => {
|
||||
if (err) {
|
||||
rttPinger[kConnection] = undefined;
|
||||
rttPinger[kRoundTripTime] = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
measureAndReschedule(conn);
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
rttPinger[kConnection].command('admin.$cmd', { ismaster: 1 }, err => {
|
||||
if (err) {
|
||||
rttPinger[kConnection] = undefined;
|
||||
rttPinger[kRoundTripTime] = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
measureAndReschedule();
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
Monitor
|
||||
};
|
||||
564
node_modules/mongodb/lib/core/sdam/server.js
generated
vendored
Normal file
564
node_modules/mongodb/lib/core/sdam/server.js
generated
vendored
Normal file
@@ -0,0 +1,564 @@
|
||||
'use strict';
|
||||
const EventEmitter = require('events');
|
||||
const ConnectionPool = require('../../cmap/connection_pool').ConnectionPool;
|
||||
const CMAP_EVENT_NAMES = require('../../cmap/events').CMAP_EVENT_NAMES;
|
||||
const MongoError = require('../error').MongoError;
|
||||
const relayEvents = require('../utils').relayEvents;
|
||||
const BSON = require('../connection/utils').retrieveBSON();
|
||||
const Logger = require('../connection/logger');
|
||||
const ServerDescription = require('./server_description').ServerDescription;
|
||||
const compareTopologyVersion = require('./server_description').compareTopologyVersion;
|
||||
const ReadPreference = require('../topologies/read_preference');
|
||||
const Monitor = require('./monitor').Monitor;
|
||||
const MongoNetworkError = require('../error').MongoNetworkError;
|
||||
const MongoNetworkTimeoutError = require('../error').MongoNetworkTimeoutError;
|
||||
const collationNotSupported = require('../utils').collationNotSupported;
|
||||
const debugOptions = require('../connection/utils').debugOptions;
|
||||
const isSDAMUnrecoverableError = require('../error').isSDAMUnrecoverableError;
|
||||
const isRetryableWriteError = require('../error').isRetryableWriteError;
|
||||
const isNodeShuttingDownError = require('../error').isNodeShuttingDownError;
|
||||
const isNetworkErrorBeforeHandshake = require('../error').isNetworkErrorBeforeHandshake;
|
||||
const maxWireVersion = require('../utils').maxWireVersion;
|
||||
const makeStateMachine = require('../utils').makeStateMachine;
|
||||
const common = require('./common');
|
||||
const ServerType = common.ServerType;
|
||||
const isTransactionCommand = require('../transactions').isTransactionCommand;
|
||||
|
||||
// Used for filtering out fields for logging
|
||||
const DEBUG_FIELDS = [
|
||||
'reconnect',
|
||||
'reconnectTries',
|
||||
'reconnectInterval',
|
||||
'emitError',
|
||||
'cursorFactory',
|
||||
'host',
|
||||
'port',
|
||||
'size',
|
||||
'keepAlive',
|
||||
'keepAliveInitialDelay',
|
||||
'noDelay',
|
||||
'connectionTimeout',
|
||||
'checkServerIdentity',
|
||||
'socketTimeout',
|
||||
'ssl',
|
||||
'ca',
|
||||
'crl',
|
||||
'cert',
|
||||
'key',
|
||||
'rejectUnauthorized',
|
||||
'promoteLongs',
|
||||
'promoteValues',
|
||||
'promoteBuffers',
|
||||
'servername'
|
||||
];
|
||||
|
||||
const STATE_CLOSING = common.STATE_CLOSING;
|
||||
const STATE_CLOSED = common.STATE_CLOSED;
|
||||
const STATE_CONNECTING = common.STATE_CONNECTING;
|
||||
const STATE_CONNECTED = common.STATE_CONNECTED;
|
||||
const stateTransition = makeStateMachine({
|
||||
[STATE_CLOSED]: [STATE_CLOSED, STATE_CONNECTING],
|
||||
[STATE_CONNECTING]: [STATE_CONNECTING, STATE_CLOSING, STATE_CONNECTED, STATE_CLOSED],
|
||||
[STATE_CONNECTED]: [STATE_CONNECTED, STATE_CLOSING, STATE_CLOSED],
|
||||
[STATE_CLOSING]: [STATE_CLOSING, STATE_CLOSED]
|
||||
});
|
||||
|
||||
const kMonitor = Symbol('monitor');
|
||||
|
||||
/**
|
||||
*
|
||||
* @fires Server#serverHeartbeatStarted
|
||||
* @fires Server#serverHeartbeatSucceeded
|
||||
* @fires Server#serverHeartbeatFailed
|
||||
*/
|
||||
class Server extends EventEmitter {
|
||||
/**
|
||||
* Create a server
|
||||
*
|
||||
* @param {ServerDescription} description
|
||||
* @param {Object} options
|
||||
*/
|
||||
constructor(description, options, topology) {
|
||||
super();
|
||||
|
||||
this.s = {
|
||||
// the server description
|
||||
description,
|
||||
// a saved copy of the incoming options
|
||||
options,
|
||||
// the server logger
|
||||
logger: Logger('Server', options),
|
||||
// the bson parser
|
||||
bson:
|
||||
options.bson ||
|
||||
new BSON([
|
||||
BSON.Binary,
|
||||
BSON.Code,
|
||||
BSON.DBRef,
|
||||
BSON.Decimal128,
|
||||
BSON.Double,
|
||||
BSON.Int32,
|
||||
BSON.Long,
|
||||
BSON.Map,
|
||||
BSON.MaxKey,
|
||||
BSON.MinKey,
|
||||
BSON.ObjectId,
|
||||
BSON.BSONRegExp,
|
||||
BSON.Symbol,
|
||||
BSON.Timestamp
|
||||
]),
|
||||
// the server state
|
||||
state: STATE_CLOSED,
|
||||
credentials: options.credentials,
|
||||
topology
|
||||
};
|
||||
|
||||
// create the connection pool
|
||||
// NOTE: this used to happen in `connect`, we supported overriding pool options there
|
||||
const poolOptions = Object.assign(
|
||||
{ host: this.description.host, port: this.description.port, bson: this.s.bson },
|
||||
options
|
||||
);
|
||||
|
||||
this.s.pool = new ConnectionPool(poolOptions);
|
||||
relayEvents(
|
||||
this.s.pool,
|
||||
this,
|
||||
['commandStarted', 'commandSucceeded', 'commandFailed'].concat(CMAP_EVENT_NAMES)
|
||||
);
|
||||
|
||||
this.s.pool.on('clusterTimeReceived', clusterTime => {
|
||||
this.clusterTime = clusterTime;
|
||||
});
|
||||
|
||||
// create the monitor
|
||||
this[kMonitor] = new Monitor(this, this.s.options);
|
||||
relayEvents(this[kMonitor], this, [
|
||||
'serverHeartbeatStarted',
|
||||
'serverHeartbeatSucceeded',
|
||||
'serverHeartbeatFailed',
|
||||
|
||||
// legacy events
|
||||
'monitoring'
|
||||
]);
|
||||
|
||||
this[kMonitor].on('resetConnectionPool', () => {
|
||||
this.s.pool.clear();
|
||||
});
|
||||
|
||||
this[kMonitor].on('resetServer', error => markServerUnknown(this, error));
|
||||
this[kMonitor].on('serverHeartbeatSucceeded', event => {
|
||||
this.emit(
|
||||
'descriptionReceived',
|
||||
new ServerDescription(this.description.address, event.reply, {
|
||||
roundTripTime: calculateRoundTripTime(this.description.roundTripTime, event.duration)
|
||||
})
|
||||
);
|
||||
|
||||
if (this.s.state === STATE_CONNECTING) {
|
||||
stateTransition(this, STATE_CONNECTED);
|
||||
this.emit('connect', this);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
get description() {
|
||||
return this.s.description;
|
||||
}
|
||||
|
||||
get name() {
|
||||
return this.s.description.address;
|
||||
}
|
||||
|
||||
get autoEncrypter() {
|
||||
if (this.s.options && this.s.options.autoEncrypter) {
|
||||
return this.s.options.autoEncrypter;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initiate server connect
|
||||
*/
|
||||
connect() {
|
||||
if (this.s.state !== STATE_CLOSED) {
|
||||
return;
|
||||
}
|
||||
|
||||
stateTransition(this, STATE_CONNECTING);
|
||||
this[kMonitor].connect();
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroy the server connection
|
||||
*
|
||||
* @param {object} [options] Optional settings
|
||||
* @param {Boolean} [options.force=false] Force destroy the pool
|
||||
*/
|
||||
destroy(options, callback) {
|
||||
if (typeof options === 'function') (callback = options), (options = {});
|
||||
options = Object.assign({}, { force: false }, options);
|
||||
|
||||
if (this.s.state === STATE_CLOSED) {
|
||||
if (typeof callback === 'function') {
|
||||
callback();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
stateTransition(this, STATE_CLOSING);
|
||||
|
||||
this[kMonitor].close();
|
||||
this.s.pool.close(options, err => {
|
||||
stateTransition(this, STATE_CLOSED);
|
||||
this.emit('closed');
|
||||
if (typeof callback === 'function') {
|
||||
callback(err);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Immediately schedule monitoring of this server. If there already an attempt being made
|
||||
* this will be a no-op.
|
||||
*/
|
||||
requestCheck() {
|
||||
this[kMonitor].requestCheck();
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a command
|
||||
*
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {object} cmd The command hash
|
||||
* @param {object} [options] Optional settings
|
||||
* @param {ReadPreference} [options.readPreference] Specify read preference if command supports it
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.checkKeys=false] Specify if the bson parser should validate keys.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {Boolean} [options.fullResult=false] Return the full envelope instead of just the result document.
|
||||
* @param {ClientSession} [options.session] Session to use for the operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
command(ns, cmd, options, callback) {
|
||||
if (typeof options === 'function') {
|
||||
(callback = options), (options = {}), (options = options || {});
|
||||
}
|
||||
|
||||
if (this.s.state === STATE_CLOSING || this.s.state === STATE_CLOSED) {
|
||||
callback(new MongoError('server is closed'));
|
||||
return;
|
||||
}
|
||||
|
||||
const error = basicReadValidations(this, options);
|
||||
if (error) {
|
||||
return callback(error);
|
||||
}
|
||||
|
||||
// Clone the options
|
||||
options = Object.assign({}, options, { wireProtocolCommand: false });
|
||||
|
||||
// Debug log
|
||||
if (this.s.logger.isDebug()) {
|
||||
this.s.logger.debug(
|
||||
`executing command [${JSON.stringify({
|
||||
ns,
|
||||
cmd,
|
||||
options: debugOptions(DEBUG_FIELDS, options)
|
||||
})}] against ${this.name}`
|
||||
);
|
||||
}
|
||||
|
||||
// error if collation not supported
|
||||
if (collationNotSupported(this, cmd)) {
|
||||
callback(new MongoError(`server ${this.name} does not support collation`));
|
||||
return;
|
||||
}
|
||||
|
||||
this.s.pool.withConnection((err, conn, cb) => {
|
||||
if (err) {
|
||||
markServerUnknown(this, err);
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
conn.command(ns, cmd, options, makeOperationHandler(this, conn, cmd, options, cb));
|
||||
}, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a query against the server
|
||||
*
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {object} cmd The command document for the query
|
||||
* @param {object} options Optional settings
|
||||
* @param {function} callback
|
||||
*/
|
||||
query(ns, cmd, cursorState, options, callback) {
|
||||
if (this.s.state === STATE_CLOSING || this.s.state === STATE_CLOSED) {
|
||||
callback(new MongoError('server is closed'));
|
||||
return;
|
||||
}
|
||||
|
||||
this.s.pool.withConnection((err, conn, cb) => {
|
||||
if (err) {
|
||||
markServerUnknown(this, err);
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
conn.query(ns, cmd, cursorState, options, makeOperationHandler(this, conn, cmd, options, cb));
|
||||
}, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a `getMore` against the server
|
||||
*
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {object} cursorState State data associated with the cursor calling this method
|
||||
* @param {object} options Optional settings
|
||||
* @param {function} callback
|
||||
*/
|
||||
getMore(ns, cursorState, batchSize, options, callback) {
|
||||
if (this.s.state === STATE_CLOSING || this.s.state === STATE_CLOSED) {
|
||||
callback(new MongoError('server is closed'));
|
||||
return;
|
||||
}
|
||||
|
||||
this.s.pool.withConnection((err, conn, cb) => {
|
||||
if (err) {
|
||||
markServerUnknown(this, err);
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
conn.getMore(
|
||||
ns,
|
||||
cursorState,
|
||||
batchSize,
|
||||
options,
|
||||
makeOperationHandler(this, conn, null, options, cb)
|
||||
);
|
||||
}, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a `killCursors` command against the server
|
||||
*
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {object} cursorState State data associated with the cursor calling this method
|
||||
* @param {function} callback
|
||||
*/
|
||||
killCursors(ns, cursorState, callback) {
|
||||
if (this.s.state === STATE_CLOSING || this.s.state === STATE_CLOSED) {
|
||||
if (typeof callback === 'function') {
|
||||
callback(new MongoError('server is closed'));
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
this.s.pool.withConnection((err, conn, cb) => {
|
||||
if (err) {
|
||||
markServerUnknown(this, err);
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
conn.killCursors(ns, cursorState, makeOperationHandler(this, conn, null, undefined, cb));
|
||||
}, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Insert one or more documents
|
||||
* @method
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {array} ops An array of documents to insert
|
||||
* @param {boolean} [options.ordered=true] Execute in order or out of order
|
||||
* @param {object} [options.writeConcern={}] Write concern for the operation
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {ClientSession} [options.session] Session to use for the operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
insert(ns, ops, options, callback) {
|
||||
executeWriteOperation({ server: this, op: 'insert', ns, ops }, options, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform one or more update operations
|
||||
* @method
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {array} ops An array of updates
|
||||
* @param {boolean} [options.ordered=true] Execute in order or out of order
|
||||
* @param {object} [options.writeConcern={}] Write concern for the operation
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {ClientSession} [options.session] Session to use for the operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
update(ns, ops, options, callback) {
|
||||
executeWriteOperation({ server: this, op: 'update', ns, ops }, options, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform one or more remove operations
|
||||
* @method
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {array} ops An array of removes
|
||||
* @param {boolean} [options.ordered=true] Execute in order or out of order
|
||||
* @param {object} [options.writeConcern={}] Write concern for the operation
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {ClientSession} [options.session] Session to use for the operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
remove(ns, ops, options, callback) {
|
||||
executeWriteOperation({ server: this, op: 'remove', ns, ops }, options, callback);
|
||||
}
|
||||
}
|
||||
|
||||
Object.defineProperty(Server.prototype, 'clusterTime', {
|
||||
get: function() {
|
||||
return this.s.topology.clusterTime;
|
||||
},
|
||||
set: function(clusterTime) {
|
||||
this.s.topology.clusterTime = clusterTime;
|
||||
}
|
||||
});
|
||||
|
||||
function supportsRetryableWrites(server) {
|
||||
return (
|
||||
server.description.maxWireVersion >= 6 &&
|
||||
server.description.logicalSessionTimeoutMinutes &&
|
||||
server.description.type !== ServerType.Standalone
|
||||
);
|
||||
}
|
||||
|
||||
function calculateRoundTripTime(oldRtt, duration) {
|
||||
if (oldRtt === -1) {
|
||||
return duration;
|
||||
}
|
||||
|
||||
const alpha = 0.2;
|
||||
return alpha * duration + (1 - alpha) * oldRtt;
|
||||
}
|
||||
|
||||
function basicReadValidations(server, options) {
|
||||
if (options.readPreference && !(options.readPreference instanceof ReadPreference)) {
|
||||
return new MongoError('readPreference must be an instance of ReadPreference');
|
||||
}
|
||||
}
|
||||
|
||||
function executeWriteOperation(args, options, callback) {
|
||||
if (typeof options === 'function') (callback = options), (options = {});
|
||||
options = options || {};
|
||||
|
||||
// TODO: once we drop Node 4, use destructuring either here or in arguments.
|
||||
const server = args.server;
|
||||
const op = args.op;
|
||||
const ns = args.ns;
|
||||
const ops = Array.isArray(args.ops) ? args.ops : [args.ops];
|
||||
|
||||
if (server.s.state === STATE_CLOSING || server.s.state === STATE_CLOSED) {
|
||||
callback(new MongoError('server is closed'));
|
||||
return;
|
||||
}
|
||||
|
||||
if (collationNotSupported(server, options)) {
|
||||
callback(new MongoError(`server ${server.name} does not support collation`));
|
||||
return;
|
||||
}
|
||||
const unacknowledgedWrite = options.writeConcern && options.writeConcern.w === 0;
|
||||
if (unacknowledgedWrite || maxWireVersion(server) < 5) {
|
||||
if ((op === 'update' || op === 'remove') && ops.find(o => o.hint)) {
|
||||
callback(new MongoError(`servers < 3.4 do not support hint on ${op}`));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
server.s.pool.withConnection((err, conn, cb) => {
|
||||
if (err) {
|
||||
markServerUnknown(server, err);
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
conn[op](ns, ops, options, makeOperationHandler(server, conn, ops, options, cb));
|
||||
}, callback);
|
||||
}
|
||||
|
||||
function markServerUnknown(server, error) {
|
||||
if (error instanceof MongoNetworkError && !(error instanceof MongoNetworkTimeoutError)) {
|
||||
server[kMonitor].reset();
|
||||
}
|
||||
|
||||
server.emit(
|
||||
'descriptionReceived',
|
||||
new ServerDescription(server.description.address, null, {
|
||||
error,
|
||||
topologyVersion:
|
||||
error && error.topologyVersion ? error.topologyVersion : server.description.topologyVersion
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
function connectionIsStale(pool, connection) {
|
||||
return connection.generation !== pool.generation;
|
||||
}
|
||||
|
||||
function shouldHandleStateChangeError(server, err) {
|
||||
const etv = err.topologyVersion;
|
||||
const stv = server.description.topologyVersion;
|
||||
|
||||
return compareTopologyVersion(stv, etv) < 0;
|
||||
}
|
||||
|
||||
function inActiveTransaction(session, cmd) {
|
||||
return session && session.inTransaction() && !isTransactionCommand(cmd);
|
||||
}
|
||||
|
||||
function makeOperationHandler(server, connection, cmd, options, callback) {
|
||||
const session = options && options.session;
|
||||
|
||||
return function handleOperationResult(err, result) {
|
||||
if (err && !connectionIsStale(server.s.pool, connection)) {
|
||||
if (err instanceof MongoNetworkError) {
|
||||
if (session && !session.hasEnded) {
|
||||
session.serverSession.isDirty = true;
|
||||
}
|
||||
|
||||
if (supportsRetryableWrites(server) && !inActiveTransaction(session, cmd)) {
|
||||
err.addErrorLabel('RetryableWriteError');
|
||||
}
|
||||
|
||||
if (!(err instanceof MongoNetworkTimeoutError) || isNetworkErrorBeforeHandshake(err)) {
|
||||
markServerUnknown(server, err);
|
||||
server.s.pool.clear();
|
||||
}
|
||||
} else {
|
||||
// if pre-4.4 server, then add error label if its a retryable write error
|
||||
if (
|
||||
maxWireVersion(server) < 9 &&
|
||||
isRetryableWriteError(err) &&
|
||||
!inActiveTransaction(session, cmd)
|
||||
) {
|
||||
err.addErrorLabel('RetryableWriteError');
|
||||
}
|
||||
|
||||
if (isSDAMUnrecoverableError(err)) {
|
||||
if (shouldHandleStateChangeError(server, err)) {
|
||||
if (maxWireVersion(server) <= 7 || isNodeShuttingDownError(err)) {
|
||||
server.s.pool.clear();
|
||||
}
|
||||
|
||||
markServerUnknown(server, err);
|
||||
process.nextTick(() => server.requestCheck());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
callback(err, result);
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
Server
|
||||
};
|
||||
227
node_modules/mongodb/lib/core/sdam/server_description.js
generated
vendored
Normal file
227
node_modules/mongodb/lib/core/sdam/server_description.js
generated
vendored
Normal file
@@ -0,0 +1,227 @@
|
||||
'use strict';
|
||||
|
||||
const arrayStrictEqual = require('../utils').arrayStrictEqual;
|
||||
const tagsStrictEqual = require('../utils').tagsStrictEqual;
|
||||
const errorStrictEqual = require('../utils').errorStrictEqual;
|
||||
const ServerType = require('./common').ServerType;
|
||||
const now = require('../../utils').now;
|
||||
|
||||
const WRITABLE_SERVER_TYPES = new Set([
|
||||
ServerType.RSPrimary,
|
||||
ServerType.Standalone,
|
||||
ServerType.Mongos
|
||||
]);
|
||||
|
||||
const DATA_BEARING_SERVER_TYPES = new Set([
|
||||
ServerType.RSPrimary,
|
||||
ServerType.RSSecondary,
|
||||
ServerType.Mongos,
|
||||
ServerType.Standalone
|
||||
]);
|
||||
|
||||
const ISMASTER_FIELDS = [
|
||||
'minWireVersion',
|
||||
'maxWireVersion',
|
||||
'maxBsonObjectSize',
|
||||
'maxMessageSizeBytes',
|
||||
'maxWriteBatchSize',
|
||||
'compression',
|
||||
'me',
|
||||
'hosts',
|
||||
'passives',
|
||||
'arbiters',
|
||||
'tags',
|
||||
'setName',
|
||||
'setVersion',
|
||||
'electionId',
|
||||
'primary',
|
||||
'logicalSessionTimeoutMinutes',
|
||||
'saslSupportedMechs',
|
||||
'__nodejs_mock_server__',
|
||||
'$clusterTime'
|
||||
];
|
||||
|
||||
/**
|
||||
* The client's view of a single server, based on the most recent ismaster outcome.
|
||||
*
|
||||
* Internal type, not meant to be directly instantiated
|
||||
*/
|
||||
class ServerDescription {
|
||||
/**
|
||||
* Create a ServerDescription
|
||||
* @param {String} address The address of the server
|
||||
* @param {Object} [ismaster] An optional ismaster response for this server
|
||||
* @param {Object} [options] Optional settings
|
||||
* @param {Number} [options.roundTripTime] The round trip time to ping this server (in ms)
|
||||
* @param {Error} [options.error] An Error used for better reporting debugging
|
||||
* @param {any} [options.topologyVersion] The topologyVersion
|
||||
*/
|
||||
constructor(address, ismaster, options) {
|
||||
options = options || {};
|
||||
ismaster = Object.assign(
|
||||
{
|
||||
minWireVersion: 0,
|
||||
maxWireVersion: 0,
|
||||
hosts: [],
|
||||
passives: [],
|
||||
arbiters: [],
|
||||
tags: []
|
||||
},
|
||||
ismaster
|
||||
);
|
||||
|
||||
this.address = address;
|
||||
this.error = options.error;
|
||||
this.roundTripTime = options.roundTripTime || -1;
|
||||
this.lastUpdateTime = now();
|
||||
this.lastWriteDate = ismaster.lastWrite ? ismaster.lastWrite.lastWriteDate : null;
|
||||
this.opTime = ismaster.lastWrite ? ismaster.lastWrite.opTime : null;
|
||||
this.type = parseServerType(ismaster);
|
||||
this.topologyVersion = options.topologyVersion || ismaster.topologyVersion;
|
||||
|
||||
// direct mappings
|
||||
ISMASTER_FIELDS.forEach(field => {
|
||||
if (typeof ismaster[field] !== 'undefined') this[field] = ismaster[field];
|
||||
});
|
||||
|
||||
// normalize case for hosts
|
||||
if (this.me) this.me = this.me.toLowerCase();
|
||||
this.hosts = this.hosts.map(host => host.toLowerCase());
|
||||
this.passives = this.passives.map(host => host.toLowerCase());
|
||||
this.arbiters = this.arbiters.map(host => host.toLowerCase());
|
||||
}
|
||||
|
||||
get allHosts() {
|
||||
return this.hosts.concat(this.arbiters).concat(this.passives);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return {Boolean} Is this server available for reads
|
||||
*/
|
||||
get isReadable() {
|
||||
return this.type === ServerType.RSSecondary || this.isWritable;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return {Boolean} Is this server data bearing
|
||||
*/
|
||||
get isDataBearing() {
|
||||
return DATA_BEARING_SERVER_TYPES.has(this.type);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return {Boolean} Is this server available for writes
|
||||
*/
|
||||
get isWritable() {
|
||||
return WRITABLE_SERVER_TYPES.has(this.type);
|
||||
}
|
||||
|
||||
get host() {
|
||||
const chopLength = `:${this.port}`.length;
|
||||
return this.address.slice(0, -chopLength);
|
||||
}
|
||||
|
||||
get port() {
|
||||
const port = this.address.split(':').pop();
|
||||
return port ? Number.parseInt(port, 10) : port;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if another `ServerDescription` is equal to this one per the rules defined
|
||||
* in the {@link https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#serverdescription|SDAM spec}
|
||||
*
|
||||
* @param {ServerDescription} other
|
||||
* @return {Boolean}
|
||||
*/
|
||||
equals(other) {
|
||||
const topologyVersionsEqual =
|
||||
this.topologyVersion === other.topologyVersion ||
|
||||
compareTopologyVersion(this.topologyVersion, other.topologyVersion) === 0;
|
||||
|
||||
return (
|
||||
other != null &&
|
||||
errorStrictEqual(this.error, other.error) &&
|
||||
this.type === other.type &&
|
||||
this.minWireVersion === other.minWireVersion &&
|
||||
this.me === other.me &&
|
||||
arrayStrictEqual(this.hosts, other.hosts) &&
|
||||
tagsStrictEqual(this.tags, other.tags) &&
|
||||
this.setName === other.setName &&
|
||||
this.setVersion === other.setVersion &&
|
||||
(this.electionId
|
||||
? other.electionId && this.electionId.equals(other.electionId)
|
||||
: this.electionId === other.electionId) &&
|
||||
this.primary === other.primary &&
|
||||
this.logicalSessionTimeoutMinutes === other.logicalSessionTimeoutMinutes &&
|
||||
topologyVersionsEqual
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses an `ismaster` message and determines the server type
|
||||
*
|
||||
* @param {Object} ismaster The `ismaster` message to parse
|
||||
* @return {ServerType}
|
||||
*/
|
||||
function parseServerType(ismaster) {
|
||||
if (!ismaster || !ismaster.ok) {
|
||||
return ServerType.Unknown;
|
||||
}
|
||||
|
||||
if (ismaster.isreplicaset) {
|
||||
return ServerType.RSGhost;
|
||||
}
|
||||
|
||||
if (ismaster.msg && ismaster.msg === 'isdbgrid') {
|
||||
return ServerType.Mongos;
|
||||
}
|
||||
|
||||
if (ismaster.setName) {
|
||||
if (ismaster.hidden) {
|
||||
return ServerType.RSOther;
|
||||
} else if (ismaster.ismaster) {
|
||||
return ServerType.RSPrimary;
|
||||
} else if (ismaster.secondary) {
|
||||
return ServerType.RSSecondary;
|
||||
} else if (ismaster.arbiterOnly) {
|
||||
return ServerType.RSArbiter;
|
||||
} else {
|
||||
return ServerType.RSOther;
|
||||
}
|
||||
}
|
||||
|
||||
return ServerType.Standalone;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compares two topology versions.
|
||||
*
|
||||
* @param {object} lhs
|
||||
* @param {object} rhs
|
||||
* @returns A negative number if `lhs` is older than `rhs`; positive if `lhs` is newer than `rhs`; 0 if they are equivalent.
|
||||
*/
|
||||
function compareTopologyVersion(lhs, rhs) {
|
||||
if (lhs == null || rhs == null) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (lhs.processId.equals(rhs.processId)) {
|
||||
// TODO: handle counters as Longs
|
||||
if (lhs.counter === rhs.counter) {
|
||||
return 0;
|
||||
} else if (lhs.counter < rhs.counter) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
ServerDescription,
|
||||
parseServerType,
|
||||
compareTopologyVersion
|
||||
};
|
||||
238
node_modules/mongodb/lib/core/sdam/server_selection.js
generated
vendored
Normal file
238
node_modules/mongodb/lib/core/sdam/server_selection.js
generated
vendored
Normal file
@@ -0,0 +1,238 @@
|
||||
'use strict';
|
||||
const ServerType = require('./common').ServerType;
|
||||
const TopologyType = require('./common').TopologyType;
|
||||
const ReadPreference = require('../topologies/read_preference');
|
||||
const MongoError = require('../error').MongoError;
|
||||
|
||||
// max staleness constants
|
||||
const IDLE_WRITE_PERIOD = 10000;
|
||||
const SMALLEST_MAX_STALENESS_SECONDS = 90;
|
||||
|
||||
/**
|
||||
* Returns a server selector that selects for writable servers
|
||||
*/
|
||||
function writableServerSelector() {
|
||||
return function(topologyDescription, servers) {
|
||||
return latencyWindowReducer(
|
||||
topologyDescription,
|
||||
servers.filter(s => s.isWritable)
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Reduces the passed in array of servers by the rules of the "Max Staleness" specification
|
||||
* found here: https://github.com/mongodb/specifications/blob/master/source/max-staleness/max-staleness.rst
|
||||
*
|
||||
* @param {ReadPreference} readPreference The read preference providing max staleness guidance
|
||||
* @param {topologyDescription} topologyDescription The topology description
|
||||
* @param {ServerDescription[]} servers The list of server descriptions to be reduced
|
||||
* @return {ServerDescription[]} The list of servers that satisfy the requirements of max staleness
|
||||
*/
|
||||
function maxStalenessReducer(readPreference, topologyDescription, servers) {
|
||||
if (readPreference.maxStalenessSeconds == null || readPreference.maxStalenessSeconds < 0) {
|
||||
return servers;
|
||||
}
|
||||
|
||||
const maxStaleness = readPreference.maxStalenessSeconds;
|
||||
const maxStalenessVariance =
|
||||
(topologyDescription.heartbeatFrequencyMS + IDLE_WRITE_PERIOD) / 1000;
|
||||
if (maxStaleness < maxStalenessVariance) {
|
||||
throw new MongoError(`maxStalenessSeconds must be at least ${maxStalenessVariance} seconds`);
|
||||
}
|
||||
|
||||
if (maxStaleness < SMALLEST_MAX_STALENESS_SECONDS) {
|
||||
throw new MongoError(
|
||||
`maxStalenessSeconds must be at least ${SMALLEST_MAX_STALENESS_SECONDS} seconds`
|
||||
);
|
||||
}
|
||||
|
||||
if (topologyDescription.type === TopologyType.ReplicaSetWithPrimary) {
|
||||
const primary = Array.from(topologyDescription.servers.values()).filter(primaryFilter)[0];
|
||||
return servers.reduce((result, server) => {
|
||||
const stalenessMS =
|
||||
server.lastUpdateTime -
|
||||
server.lastWriteDate -
|
||||
(primary.lastUpdateTime - primary.lastWriteDate) +
|
||||
topologyDescription.heartbeatFrequencyMS;
|
||||
|
||||
const staleness = stalenessMS / 1000;
|
||||
if (staleness <= readPreference.maxStalenessSeconds) result.push(server);
|
||||
return result;
|
||||
}, []);
|
||||
}
|
||||
|
||||
if (topologyDescription.type === TopologyType.ReplicaSetNoPrimary) {
|
||||
if (servers.length === 0) {
|
||||
return servers;
|
||||
}
|
||||
|
||||
const sMax = servers.reduce((max, s) => (s.lastWriteDate > max.lastWriteDate ? s : max));
|
||||
return servers.reduce((result, server) => {
|
||||
const stalenessMS =
|
||||
sMax.lastWriteDate - server.lastWriteDate + topologyDescription.heartbeatFrequencyMS;
|
||||
|
||||
const staleness = stalenessMS / 1000;
|
||||
if (staleness <= readPreference.maxStalenessSeconds) result.push(server);
|
||||
return result;
|
||||
}, []);
|
||||
}
|
||||
|
||||
return servers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines whether a server's tags match a given set of tags
|
||||
*
|
||||
* @param {String[]} tagSet The requested tag set to match
|
||||
* @param {String[]} serverTags The server's tags
|
||||
*/
|
||||
function tagSetMatch(tagSet, serverTags) {
|
||||
const keys = Object.keys(tagSet);
|
||||
const serverTagKeys = Object.keys(serverTags);
|
||||
for (let i = 0; i < keys.length; ++i) {
|
||||
const key = keys[i];
|
||||
if (serverTagKeys.indexOf(key) === -1 || serverTags[key] !== tagSet[key]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reduces a set of server descriptions based on tags requested by the read preference
|
||||
*
|
||||
* @param {ReadPreference} readPreference The read preference providing the requested tags
|
||||
* @param {ServerDescription[]} servers The list of server descriptions to reduce
|
||||
* @return {ServerDescription[]} The list of servers matching the requested tags
|
||||
*/
|
||||
function tagSetReducer(readPreference, servers) {
|
||||
if (
|
||||
readPreference.tags == null ||
|
||||
(Array.isArray(readPreference.tags) && readPreference.tags.length === 0)
|
||||
) {
|
||||
return servers;
|
||||
}
|
||||
|
||||
for (let i = 0; i < readPreference.tags.length; ++i) {
|
||||
const tagSet = readPreference.tags[i];
|
||||
const serversMatchingTagset = servers.reduce((matched, server) => {
|
||||
if (tagSetMatch(tagSet, server.tags)) matched.push(server);
|
||||
return matched;
|
||||
}, []);
|
||||
|
||||
if (serversMatchingTagset.length) {
|
||||
return serversMatchingTagset;
|
||||
}
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Reduces a list of servers to ensure they fall within an acceptable latency window. This is
|
||||
* further specified in the "Server Selection" specification, found here:
|
||||
* https://github.com/mongodb/specifications/blob/master/source/server-selection/server-selection.rst
|
||||
*
|
||||
* @param {topologyDescription} topologyDescription The topology description
|
||||
* @param {ServerDescription[]} servers The list of servers to reduce
|
||||
* @returns {ServerDescription[]} The servers which fall within an acceptable latency window
|
||||
*/
|
||||
function latencyWindowReducer(topologyDescription, servers) {
|
||||
const low = servers.reduce(
|
||||
(min, server) => (min === -1 ? server.roundTripTime : Math.min(server.roundTripTime, min)),
|
||||
-1
|
||||
);
|
||||
|
||||
const high = low + topologyDescription.localThresholdMS;
|
||||
|
||||
return servers.reduce((result, server) => {
|
||||
if (server.roundTripTime <= high && server.roundTripTime >= low) result.push(server);
|
||||
return result;
|
||||
}, []);
|
||||
}
|
||||
|
||||
// filters
|
||||
function primaryFilter(server) {
|
||||
return server.type === ServerType.RSPrimary;
|
||||
}
|
||||
|
||||
function secondaryFilter(server) {
|
||||
return server.type === ServerType.RSSecondary;
|
||||
}
|
||||
|
||||
function nearestFilter(server) {
|
||||
return server.type === ServerType.RSSecondary || server.type === ServerType.RSPrimary;
|
||||
}
|
||||
|
||||
function knownFilter(server) {
|
||||
return server.type !== ServerType.Unknown;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a function which selects servers based on a provided read preference
|
||||
*
|
||||
* @param {ReadPreference} readPreference The read preference to select with
|
||||
*/
|
||||
function readPreferenceServerSelector(readPreference) {
|
||||
if (!readPreference.isValid()) {
|
||||
throw new TypeError('Invalid read preference specified');
|
||||
}
|
||||
|
||||
return function(topologyDescription, servers) {
|
||||
const commonWireVersion = topologyDescription.commonWireVersion;
|
||||
if (
|
||||
commonWireVersion &&
|
||||
readPreference.minWireVersion &&
|
||||
readPreference.minWireVersion > commonWireVersion
|
||||
) {
|
||||
throw new MongoError(
|
||||
`Minimum wire version '${readPreference.minWireVersion}' required, but found '${commonWireVersion}'`
|
||||
);
|
||||
}
|
||||
|
||||
if (topologyDescription.type === TopologyType.Unknown) {
|
||||
return [];
|
||||
}
|
||||
|
||||
if (
|
||||
topologyDescription.type === TopologyType.Single ||
|
||||
topologyDescription.type === TopologyType.Sharded
|
||||
) {
|
||||
return latencyWindowReducer(topologyDescription, servers.filter(knownFilter));
|
||||
}
|
||||
|
||||
const mode = readPreference.mode;
|
||||
if (mode === ReadPreference.PRIMARY) {
|
||||
return servers.filter(primaryFilter);
|
||||
}
|
||||
|
||||
if (mode === ReadPreference.PRIMARY_PREFERRED) {
|
||||
const result = servers.filter(primaryFilter);
|
||||
if (result.length) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
const filter = mode === ReadPreference.NEAREST ? nearestFilter : secondaryFilter;
|
||||
const selectedServers = latencyWindowReducer(
|
||||
topologyDescription,
|
||||
tagSetReducer(
|
||||
readPreference,
|
||||
maxStalenessReducer(readPreference, topologyDescription, servers.filter(filter))
|
||||
)
|
||||
);
|
||||
|
||||
if (mode === ReadPreference.SECONDARY_PREFERRED && selectedServers.length === 0) {
|
||||
return servers.filter(primaryFilter);
|
||||
}
|
||||
|
||||
return selectedServers;
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
writableServerSelector,
|
||||
readPreferenceServerSelector
|
||||
};
|
||||
135
node_modules/mongodb/lib/core/sdam/srv_polling.js
generated
vendored
Normal file
135
node_modules/mongodb/lib/core/sdam/srv_polling.js
generated
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
'use strict';
|
||||
|
||||
const Logger = require('../connection/logger');
|
||||
const EventEmitter = require('events').EventEmitter;
|
||||
const dns = require('dns');
|
||||
/**
|
||||
* Determines whether a provided address matches the provided parent domain in order
|
||||
* to avoid certain attack vectors.
|
||||
*
|
||||
* @param {String} srvAddress The address to check against a domain
|
||||
* @param {String} parentDomain The domain to check the provided address against
|
||||
* @return {Boolean} Whether the provided address matches the parent domain
|
||||
*/
|
||||
function matchesParentDomain(srvAddress, parentDomain) {
|
||||
const regex = /^.*?\./;
|
||||
const srv = `.${srvAddress.replace(regex, '')}`;
|
||||
const parent = `.${parentDomain.replace(regex, '')}`;
|
||||
return srv.endsWith(parent);
|
||||
}
|
||||
|
||||
class SrvPollingEvent {
|
||||
constructor(srvRecords) {
|
||||
this.srvRecords = srvRecords;
|
||||
}
|
||||
|
||||
addresses() {
|
||||
return new Set(this.srvRecords.map(record => `${record.name}:${record.port}`));
|
||||
}
|
||||
}
|
||||
|
||||
class SrvPoller extends EventEmitter {
|
||||
/**
|
||||
* @param {object} options
|
||||
* @param {string} options.srvHost
|
||||
* @param {number} [options.heartbeatFrequencyMS]
|
||||
* @param {function} [options.logger]
|
||||
* @param {string} [options.loggerLevel]
|
||||
*/
|
||||
constructor(options) {
|
||||
super();
|
||||
|
||||
if (!options || !options.srvHost) {
|
||||
throw new TypeError('options for SrvPoller must exist and include srvHost');
|
||||
}
|
||||
|
||||
this.srvHost = options.srvHost;
|
||||
this.rescanSrvIntervalMS = 60000;
|
||||
this.heartbeatFrequencyMS = options.heartbeatFrequencyMS || 10000;
|
||||
this.logger = Logger('srvPoller', options);
|
||||
|
||||
this.haMode = false;
|
||||
this.generation = 0;
|
||||
|
||||
this._timeout = null;
|
||||
}
|
||||
|
||||
get srvAddress() {
|
||||
return `_mongodb._tcp.${this.srvHost}`;
|
||||
}
|
||||
|
||||
get intervalMS() {
|
||||
return this.haMode ? this.heartbeatFrequencyMS : this.rescanSrvIntervalMS;
|
||||
}
|
||||
|
||||
start() {
|
||||
if (!this._timeout) {
|
||||
this.schedule();
|
||||
}
|
||||
}
|
||||
|
||||
stop() {
|
||||
if (this._timeout) {
|
||||
clearTimeout(this._timeout);
|
||||
this.generation += 1;
|
||||
this._timeout = null;
|
||||
}
|
||||
}
|
||||
|
||||
schedule() {
|
||||
clearTimeout(this._timeout);
|
||||
this._timeout = setTimeout(() => this._poll(), this.intervalMS);
|
||||
}
|
||||
|
||||
success(srvRecords) {
|
||||
this.haMode = false;
|
||||
this.schedule();
|
||||
this.emit('srvRecordDiscovery', new SrvPollingEvent(srvRecords));
|
||||
}
|
||||
|
||||
failure(message, obj) {
|
||||
this.logger.warn(message, obj);
|
||||
this.haMode = true;
|
||||
this.schedule();
|
||||
}
|
||||
|
||||
parentDomainMismatch(srvRecord) {
|
||||
this.logger.warn(
|
||||
`parent domain mismatch on SRV record (${srvRecord.name}:${srvRecord.port})`,
|
||||
srvRecord
|
||||
);
|
||||
}
|
||||
|
||||
_poll() {
|
||||
const generation = this.generation;
|
||||
dns.resolveSrv(this.srvAddress, (err, srvRecords) => {
|
||||
if (generation !== this.generation) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (err) {
|
||||
this.failure('DNS error', err);
|
||||
return;
|
||||
}
|
||||
|
||||
const finalAddresses = [];
|
||||
srvRecords.forEach(record => {
|
||||
if (matchesParentDomain(record.name, this.srvHost)) {
|
||||
finalAddresses.push(record);
|
||||
} else {
|
||||
this.parentDomainMismatch(record);
|
||||
}
|
||||
});
|
||||
|
||||
if (!finalAddresses.length) {
|
||||
this.failure('No valid addresses found at host');
|
||||
return;
|
||||
}
|
||||
|
||||
this.success(finalAddresses);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.SrvPollingEvent = SrvPollingEvent;
|
||||
module.exports.SrvPoller = SrvPoller;
|
||||
1142
node_modules/mongodb/lib/core/sdam/topology.js
generated
vendored
Normal file
1142
node_modules/mongodb/lib/core/sdam/topology.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
441
node_modules/mongodb/lib/core/sdam/topology_description.js
generated
vendored
Normal file
441
node_modules/mongodb/lib/core/sdam/topology_description.js
generated
vendored
Normal file
@@ -0,0 +1,441 @@
|
||||
'use strict';
|
||||
const ServerType = require('./common').ServerType;
|
||||
const ServerDescription = require('./server_description').ServerDescription;
|
||||
const WIRE_CONSTANTS = require('../wireprotocol/constants');
|
||||
const TopologyType = require('./common').TopologyType;
|
||||
|
||||
// contstants related to compatability checks
|
||||
const MIN_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_SERVER_VERSION;
|
||||
const MAX_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_SERVER_VERSION;
|
||||
const MIN_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_WIRE_VERSION;
|
||||
const MAX_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_WIRE_VERSION;
|
||||
|
||||
// Representation of a deployment of servers
|
||||
class TopologyDescription {
|
||||
/**
|
||||
* Create a TopologyDescription
|
||||
*
|
||||
* @param {string} topologyType
|
||||
* @param {Map<string, ServerDescription>} serverDescriptions the a map of address to ServerDescription
|
||||
* @param {string} setName
|
||||
* @param {number} maxSetVersion
|
||||
* @param {ObjectId} maxElectionId
|
||||
*/
|
||||
constructor(
|
||||
topologyType,
|
||||
serverDescriptions,
|
||||
setName,
|
||||
maxSetVersion,
|
||||
maxElectionId,
|
||||
commonWireVersion,
|
||||
options
|
||||
) {
|
||||
options = options || {};
|
||||
|
||||
// TODO: consider assigning all these values to a temporary value `s` which
|
||||
// we use `Object.freeze` on, ensuring the internal state of this type
|
||||
// is immutable.
|
||||
this.type = topologyType || TopologyType.Unknown;
|
||||
this.setName = setName || null;
|
||||
this.maxSetVersion = maxSetVersion || null;
|
||||
this.maxElectionId = maxElectionId || null;
|
||||
this.servers = serverDescriptions || new Map();
|
||||
this.stale = false;
|
||||
this.compatible = true;
|
||||
this.compatibilityError = null;
|
||||
this.logicalSessionTimeoutMinutes = null;
|
||||
this.heartbeatFrequencyMS = options.heartbeatFrequencyMS || 0;
|
||||
this.localThresholdMS = options.localThresholdMS || 0;
|
||||
this.commonWireVersion = commonWireVersion || null;
|
||||
|
||||
// save this locally, but don't display when printing the instance out
|
||||
Object.defineProperty(this, 'options', { value: options, enumberable: false });
|
||||
|
||||
// determine server compatibility
|
||||
for (const serverDescription of this.servers.values()) {
|
||||
if (serverDescription.type === ServerType.Unknown) continue;
|
||||
|
||||
if (serverDescription.minWireVersion > MAX_SUPPORTED_WIRE_VERSION) {
|
||||
this.compatible = false;
|
||||
this.compatibilityError = `Server at ${serverDescription.address} requires wire version ${serverDescription.minWireVersion}, but this version of the driver only supports up to ${MAX_SUPPORTED_WIRE_VERSION} (MongoDB ${MAX_SUPPORTED_SERVER_VERSION})`;
|
||||
}
|
||||
|
||||
if (serverDescription.maxWireVersion < MIN_SUPPORTED_WIRE_VERSION) {
|
||||
this.compatible = false;
|
||||
this.compatibilityError = `Server at ${serverDescription.address} reports wire version ${serverDescription.maxWireVersion}, but this version of the driver requires at least ${MIN_SUPPORTED_WIRE_VERSION} (MongoDB ${MIN_SUPPORTED_SERVER_VERSION}).`;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Whenever a client updates the TopologyDescription from an ismaster response, it MUST set
|
||||
// TopologyDescription.logicalSessionTimeoutMinutes to the smallest logicalSessionTimeoutMinutes
|
||||
// value among ServerDescriptions of all data-bearing server types. If any have a null
|
||||
// logicalSessionTimeoutMinutes, then TopologyDescription.logicalSessionTimeoutMinutes MUST be
|
||||
// set to null.
|
||||
const readableServers = Array.from(this.servers.values()).filter(s => s.isReadable);
|
||||
this.logicalSessionTimeoutMinutes = readableServers.reduce((result, server) => {
|
||||
if (server.logicalSessionTimeoutMinutes == null) return null;
|
||||
if (result == null) return server.logicalSessionTimeoutMinutes;
|
||||
return Math.min(result, server.logicalSessionTimeoutMinutes);
|
||||
}, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new TopologyDescription based on the SrvPollingEvent
|
||||
* @param {SrvPollingEvent} ev The event
|
||||
*/
|
||||
updateFromSrvPollingEvent(ev) {
|
||||
const newAddresses = ev.addresses();
|
||||
const serverDescriptions = new Map(this.servers);
|
||||
for (const server of this.servers) {
|
||||
if (newAddresses.has(server[0])) {
|
||||
newAddresses.delete(server[0]);
|
||||
} else {
|
||||
serverDescriptions.delete(server[0]);
|
||||
}
|
||||
}
|
||||
|
||||
if (serverDescriptions.size === this.servers.size && newAddresses.size === 0) {
|
||||
return this;
|
||||
}
|
||||
|
||||
for (const address of newAddresses) {
|
||||
serverDescriptions.set(address, new ServerDescription(address));
|
||||
}
|
||||
|
||||
return new TopologyDescription(
|
||||
this.type,
|
||||
serverDescriptions,
|
||||
this.setName,
|
||||
this.maxSetVersion,
|
||||
this.maxElectionId,
|
||||
this.commonWireVersion,
|
||||
this.options,
|
||||
null
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a copy of this description updated with a given ServerDescription
|
||||
*
|
||||
* @param {ServerDescription} serverDescription
|
||||
*/
|
||||
update(serverDescription) {
|
||||
const address = serverDescription.address;
|
||||
// NOTE: there are a number of prime targets for refactoring here
|
||||
// once we support destructuring assignments
|
||||
|
||||
// potentially mutated values
|
||||
let topologyType = this.type;
|
||||
let setName = this.setName;
|
||||
let maxSetVersion = this.maxSetVersion;
|
||||
let maxElectionId = this.maxElectionId;
|
||||
let commonWireVersion = this.commonWireVersion;
|
||||
|
||||
if (serverDescription.setName && setName && serverDescription.setName !== setName) {
|
||||
serverDescription = new ServerDescription(address, null);
|
||||
}
|
||||
|
||||
const serverType = serverDescription.type;
|
||||
let serverDescriptions = new Map(this.servers);
|
||||
|
||||
// update common wire version
|
||||
if (serverDescription.maxWireVersion !== 0) {
|
||||
if (commonWireVersion == null) {
|
||||
commonWireVersion = serverDescription.maxWireVersion;
|
||||
} else {
|
||||
commonWireVersion = Math.min(commonWireVersion, serverDescription.maxWireVersion);
|
||||
}
|
||||
}
|
||||
|
||||
// update the actual server description
|
||||
serverDescriptions.set(address, serverDescription);
|
||||
|
||||
if (topologyType === TopologyType.Single) {
|
||||
// once we are defined as single, that never changes
|
||||
return new TopologyDescription(
|
||||
TopologyType.Single,
|
||||
serverDescriptions,
|
||||
setName,
|
||||
maxSetVersion,
|
||||
maxElectionId,
|
||||
commonWireVersion,
|
||||
this.options
|
||||
);
|
||||
}
|
||||
|
||||
if (topologyType === TopologyType.Unknown) {
|
||||
if (serverType === ServerType.Standalone && this.servers.size !== 1) {
|
||||
serverDescriptions.delete(address);
|
||||
} else {
|
||||
topologyType = topologyTypeForServerType(serverType);
|
||||
}
|
||||
}
|
||||
|
||||
if (topologyType === TopologyType.Sharded) {
|
||||
if ([ServerType.Mongos, ServerType.Unknown].indexOf(serverType) === -1) {
|
||||
serverDescriptions.delete(address);
|
||||
}
|
||||
}
|
||||
|
||||
if (topologyType === TopologyType.ReplicaSetNoPrimary) {
|
||||
if ([ServerType.Standalone, ServerType.Mongos].indexOf(serverType) >= 0) {
|
||||
serverDescriptions.delete(address);
|
||||
}
|
||||
|
||||
if (serverType === ServerType.RSPrimary) {
|
||||
const result = updateRsFromPrimary(
|
||||
serverDescriptions,
|
||||
setName,
|
||||
serverDescription,
|
||||
maxSetVersion,
|
||||
maxElectionId
|
||||
);
|
||||
|
||||
(topologyType = result[0]),
|
||||
(setName = result[1]),
|
||||
(maxSetVersion = result[2]),
|
||||
(maxElectionId = result[3]);
|
||||
} else if (
|
||||
[ServerType.RSSecondary, ServerType.RSArbiter, ServerType.RSOther].indexOf(serverType) >= 0
|
||||
) {
|
||||
const result = updateRsNoPrimaryFromMember(serverDescriptions, setName, serverDescription);
|
||||
(topologyType = result[0]), (setName = result[1]);
|
||||
}
|
||||
}
|
||||
|
||||
if (topologyType === TopologyType.ReplicaSetWithPrimary) {
|
||||
if ([ServerType.Standalone, ServerType.Mongos].indexOf(serverType) >= 0) {
|
||||
serverDescriptions.delete(address);
|
||||
topologyType = checkHasPrimary(serverDescriptions);
|
||||
} else if (serverType === ServerType.RSPrimary) {
|
||||
const result = updateRsFromPrimary(
|
||||
serverDescriptions,
|
||||
setName,
|
||||
serverDescription,
|
||||
maxSetVersion,
|
||||
maxElectionId
|
||||
);
|
||||
|
||||
(topologyType = result[0]),
|
||||
(setName = result[1]),
|
||||
(maxSetVersion = result[2]),
|
||||
(maxElectionId = result[3]);
|
||||
} else if (
|
||||
[ServerType.RSSecondary, ServerType.RSArbiter, ServerType.RSOther].indexOf(serverType) >= 0
|
||||
) {
|
||||
topologyType = updateRsWithPrimaryFromMember(
|
||||
serverDescriptions,
|
||||
setName,
|
||||
serverDescription
|
||||
);
|
||||
} else {
|
||||
topologyType = checkHasPrimary(serverDescriptions);
|
||||
}
|
||||
}
|
||||
|
||||
return new TopologyDescription(
|
||||
topologyType,
|
||||
serverDescriptions,
|
||||
setName,
|
||||
maxSetVersion,
|
||||
maxElectionId,
|
||||
commonWireVersion,
|
||||
this.options
|
||||
);
|
||||
}
|
||||
|
||||
get error() {
|
||||
const descriptionsWithError = Array.from(this.servers.values()).filter(sd => sd.error);
|
||||
if (descriptionsWithError.length > 0) {
|
||||
return descriptionsWithError[0].error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the topology description has any known servers
|
||||
*/
|
||||
get hasKnownServers() {
|
||||
return Array.from(this.servers.values()).some(sd => sd.type !== ServerType.Unknown);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if this topology description has a data-bearing server available.
|
||||
*/
|
||||
get hasDataBearingServers() {
|
||||
return Array.from(this.servers.values()).some(sd => sd.isDataBearing);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the topology has a definition for the provided address
|
||||
*
|
||||
* @param {String} address
|
||||
* @return {Boolean} Whether the topology knows about this server
|
||||
*/
|
||||
hasServer(address) {
|
||||
return this.servers.has(address);
|
||||
}
|
||||
}
|
||||
|
||||
function topologyTypeForServerType(serverType) {
|
||||
if (serverType === ServerType.Standalone) {
|
||||
return TopologyType.Single;
|
||||
}
|
||||
|
||||
if (serverType === ServerType.Mongos) {
|
||||
return TopologyType.Sharded;
|
||||
}
|
||||
|
||||
if (serverType === ServerType.RSPrimary) {
|
||||
return TopologyType.ReplicaSetWithPrimary;
|
||||
}
|
||||
|
||||
if (serverType === ServerType.RSGhost || serverType === ServerType.Unknown) {
|
||||
return TopologyType.Unknown;
|
||||
}
|
||||
|
||||
return TopologyType.ReplicaSetNoPrimary;
|
||||
}
|
||||
|
||||
function compareObjectId(oid1, oid2) {
|
||||
if (oid1 == null) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (oid2 == null) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (oid1.id instanceof Buffer && oid2.id instanceof Buffer) {
|
||||
const oid1Buffer = oid1.id;
|
||||
const oid2Buffer = oid2.id;
|
||||
return oid1Buffer.compare(oid2Buffer);
|
||||
}
|
||||
|
||||
const oid1String = oid1.toString();
|
||||
const oid2String = oid2.toString();
|
||||
return oid1String.localeCompare(oid2String);
|
||||
}
|
||||
|
||||
function updateRsFromPrimary(
|
||||
serverDescriptions,
|
||||
setName,
|
||||
serverDescription,
|
||||
maxSetVersion,
|
||||
maxElectionId
|
||||
) {
|
||||
setName = setName || serverDescription.setName;
|
||||
if (setName !== serverDescription.setName) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
|
||||
const electionId = serverDescription.electionId ? serverDescription.electionId : null;
|
||||
if (serverDescription.setVersion && electionId) {
|
||||
if (maxSetVersion && maxElectionId) {
|
||||
if (
|
||||
maxSetVersion > serverDescription.setVersion ||
|
||||
compareObjectId(maxElectionId, electionId) > 0
|
||||
) {
|
||||
// this primary is stale, we must remove it
|
||||
serverDescriptions.set(
|
||||
serverDescription.address,
|
||||
new ServerDescription(serverDescription.address)
|
||||
);
|
||||
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
}
|
||||
|
||||
maxElectionId = serverDescription.electionId;
|
||||
}
|
||||
|
||||
if (
|
||||
serverDescription.setVersion != null &&
|
||||
(maxSetVersion == null || serverDescription.setVersion > maxSetVersion)
|
||||
) {
|
||||
maxSetVersion = serverDescription.setVersion;
|
||||
}
|
||||
|
||||
// We've heard from the primary. Is it the same primary as before?
|
||||
for (const address of serverDescriptions.keys()) {
|
||||
const server = serverDescriptions.get(address);
|
||||
|
||||
if (server.type === ServerType.RSPrimary && server.address !== serverDescription.address) {
|
||||
// Reset old primary's type to Unknown.
|
||||
serverDescriptions.set(address, new ServerDescription(server.address));
|
||||
|
||||
// There can only be one primary
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Discover new hosts from this primary's response.
|
||||
serverDescription.allHosts.forEach(address => {
|
||||
if (!serverDescriptions.has(address)) {
|
||||
serverDescriptions.set(address, new ServerDescription(address));
|
||||
}
|
||||
});
|
||||
|
||||
// Remove hosts not in the response.
|
||||
const currentAddresses = Array.from(serverDescriptions.keys());
|
||||
const responseAddresses = serverDescription.allHosts;
|
||||
currentAddresses
|
||||
.filter(addr => responseAddresses.indexOf(addr) === -1)
|
||||
.forEach(address => {
|
||||
serverDescriptions.delete(address);
|
||||
});
|
||||
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
|
||||
function updateRsWithPrimaryFromMember(serverDescriptions, setName, serverDescription) {
|
||||
if (setName == null) {
|
||||
throw new TypeError('setName is required');
|
||||
}
|
||||
|
||||
if (
|
||||
setName !== serverDescription.setName ||
|
||||
(serverDescription.me && serverDescription.address !== serverDescription.me)
|
||||
) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
}
|
||||
|
||||
return checkHasPrimary(serverDescriptions);
|
||||
}
|
||||
|
||||
function updateRsNoPrimaryFromMember(serverDescriptions, setName, serverDescription) {
|
||||
let topologyType = TopologyType.ReplicaSetNoPrimary;
|
||||
|
||||
setName = setName || serverDescription.setName;
|
||||
if (setName !== serverDescription.setName) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
return [topologyType, setName];
|
||||
}
|
||||
|
||||
serverDescription.allHosts.forEach(address => {
|
||||
if (!serverDescriptions.has(address)) {
|
||||
serverDescriptions.set(address, new ServerDescription(address));
|
||||
}
|
||||
});
|
||||
|
||||
if (serverDescription.me && serverDescription.address !== serverDescription.me) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
}
|
||||
|
||||
return [topologyType, setName];
|
||||
}
|
||||
|
||||
function checkHasPrimary(serverDescriptions) {
|
||||
for (const addr of serverDescriptions.keys()) {
|
||||
if (serverDescriptions.get(addr).type === ServerType.RSPrimary) {
|
||||
return TopologyType.ReplicaSetWithPrimary;
|
||||
}
|
||||
}
|
||||
|
||||
return TopologyType.ReplicaSetNoPrimary;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
TopologyDescription
|
||||
};
|
||||
Reference in New Issue
Block a user