|
1 | 1 | /* eslint-disable @typescript-eslint/no-empty-function */ |
2 | | -import { expect } from 'chai'; |
3 | | -import * as sinon from 'sinon'; |
4 | | -import { Connection, HostAddress, MongoClient } from '../../mongodb'; |
5 | 2 | import { type TestConfiguration } from '../../tools/runner/config'; |
6 | 3 | import { runScriptAndGetProcessInfo } from './resource_tracking_script_builder'; |
7 | | -import { sleep } from '../../tools/utils'; |
8 | | -import { ConnectionPool } from '../../mongodb'; |
9 | | -import { getActiveResourcesInfo } from 'process'; |
10 | 4 |
|
11 | 5 | describe.only('MongoClient.close() Integration', () => { |
12 | 6 | // note: these tests are set-up in accordance of the resource ownership tree |
@@ -342,33 +336,49 @@ describe.only('MongoClient.close() Integration', () => { |
342 | 336 | describe('Node.js resource: checkOut Timer', () => { |
343 | 337 | describe('after new connection pool is created', () => { |
344 | 338 | it('the wait queue timer is cleaned up by client.close()', async function () { |
345 | | - // note: this test is not called in a separate process since it stubs internal class: ConnectionPool |
346 | 339 | const run = async function ({ MongoClient, uri, expect, sinon, sleep, getTimerCount }) { |
347 | | - const waitQueueTimeoutMS = 999; |
348 | | - const client = new MongoClient(uri, { minPoolSize: 1, waitQueueTimeoutMS }); |
| 340 | + const waitQueueTimeoutMS = 1515; |
| 341 | + |
| 342 | + // configure failPoint |
| 343 | + const utilClient = new MongoClient(uri); |
| 344 | + await utilClient.connect(); |
| 345 | + const failPoint = { |
| 346 | + configureFailPoint: 'failCommand', |
| 347 | + mode: { times: 1 }, |
| 348 | + data: { blockConnection: true, blockTimeMS: waitQueueTimeoutMS * 3, failCommands: ['insert'] } |
| 349 | + } |
| 350 | + await utilClient.db('admin').command(failPoint); |
| 351 | + |
349 | 352 | const timers = require('timers'); |
350 | 353 | const timeoutStartedSpy = sinon.spy(timers, 'setTimeout'); |
351 | | - let checkoutTimeoutStarted = false; |
352 | | - |
353 | | - // make waitQueue hang so check out timer isn't cleared and check that the timeout has started |
354 | | - sinon.stub(ConnectionPool.prototype, 'processWaitQueue').callsFake(async () => { |
355 | | - checkoutTimeoutStarted = timeoutStartedSpy.getCalls().filter(r => r.args.includes(waitQueueTimeoutMS)).flat().length > 0; |
356 | | - }); |
357 | 354 |
|
| 355 | + const client = new MongoClient(uri, { minPoolSize: 1, maxPoolSize: 1, waitQueueTimeoutMS }); |
358 | 356 | const insertPromise = client.db('db').collection('collection').insertOne({ x: 1 }).catch(e => e); |
| 357 | + client.db('db').collection('collection').insertOne({ x: 1 }).catch(e => e); |
359 | 358 |
|
360 | 359 | // don't allow entire checkout timer to elapse to ensure close is called mid-timeout |
361 | 360 | await sleep(waitQueueTimeoutMS / 2); |
| 361 | + const checkoutTimeoutStarted = timeoutStartedSpy.getCalls().filter(r => r.args.includes(waitQueueTimeoutMS)).flat().length > 0; |
362 | 362 | expect(checkoutTimeoutStarted).to.be.true; |
363 | 363 |
|
364 | 364 | await client.close(); |
365 | 365 | expect(getTimerCount()).to.equal(0); |
| 366 | + // un-configure fail{oint |
| 367 | + await utilClient |
| 368 | + .db() |
| 369 | + .admin() |
| 370 | + .command({ |
| 371 | + configureFailPoint: 'failCommand', |
| 372 | + mode: 'off' |
| 373 | + }); |
| 374 | + |
| 375 | + await utilClient.close(); |
366 | 376 |
|
367 | 377 | const err = await insertPromise; |
368 | | - expect(err).to.not.be.instanceOf(Error); |
| 378 | + expect(err).to.be.instanceOf(Error); |
| 379 | + expect(err.message).to.contain('Timed out while checking out a connection from connection pool'); |
369 | 380 | }; |
370 | | - const getTimerCount = () => process.getActiveResourcesInfo().filter(r => r === 'Timeout').length; |
371 | | - await run({ MongoClient, uri: config.uri, sleep, sinon, expect, getTimerCount}); |
| 381 | + await runScriptAndGetProcessInfo('timer-check-out', config, run); |
372 | 382 | }); |
373 | 383 | }); |
374 | 384 | }); |
@@ -409,43 +419,22 @@ describe.only('MongoClient.close() Integration', () => { |
409 | 419 |
|
410 | 420 | describe('SrvPoller', () => { |
411 | 421 | describe('Node.js resource: Timer', () => { |
412 | | - // srv polling is not available for load-balanced mode |
| 422 | + // requires an srv environment that can transition to sharded |
413 | 423 | const metadata: MongoDBMetadataUI = { |
414 | 424 | requires: { |
415 | | - topology: ['single', 'replicaset', 'sharded'] |
| 425 | + predicate: () => process.env.ATLAS_SRV_REPL ? 'Skipped: this test requires an SRV environment' : true |
416 | 426 | } |
417 | 427 | }; |
| 428 | + |
418 | 429 | describe('after SRVPoller is created', () => { |
419 | 430 | it.only('timers are cleaned up by client.close()', metadata, async () => { |
420 | | - const run = async function ({ MongoClient, uri, expect, mongodb, sinon, getTimerCount }) { |
421 | | - const dns = require('dns'); |
422 | | - sinon.stub(dns.promises, 'resolveTxt').callsFake(async () => { |
423 | | - throw { code: 'ENODATA' }; |
424 | | - }); |
425 | | - sinon.stub(dns.promises, 'resolveSrv').callsFake(async () => { |
426 | | - return [ |
427 | | - { |
428 | | - name: 'domain.localhost', |
429 | | - port: 27017, |
430 | | - weight: 0, |
431 | | - priority: 0, |
432 | | - protocol: 'IPv6' |
433 | | - } |
434 | | - ]; |
435 | | - }); |
436 | | - |
437 | | - const client = new MongoClient('mongodb+srv://localhost'); |
438 | | - client.connect(); |
439 | | - |
440 | | - const topology = client.topology; |
441 | | - const prevDesc = topology; |
442 | | - log({ topology }); |
443 | | - const currDesc = prevDesc; |
444 | | - client.topology.emit( |
445 | | - 'topologyDescriptionChanged', |
446 | | - mongodb.TopologyDescriptionChangedEvent(client.topology.s.id, prevDesc, currDesc) |
447 | | - ); |
448 | | - |
| 431 | + const run = async function ({ MongoClient, uri, expect, sinon, getTimerCount }) { |
| 432 | + const client = new MongoClient(uri); |
| 433 | + await client.connect(); |
| 434 | + const description = client.topology.s.description; |
| 435 | + // simulate transition to sharded |
| 436 | + client.topology.emit('topologyDescriptionChanged', description, { ... description, type: 'Sharded'}); |
| 437 | + expect(client.topology.s.srvPoller?._timeout).to.exist; |
449 | 438 | await client.close(); |
450 | 439 | expect(getTimerCount()).to.equal(0); |
451 | 440 | }; |
|
0 commit comments