From f5fca7ee1292ae0b015d0e00fd455d7c04d6d8fa Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 15:24:10 +0200 Subject: [PATCH 01/41] test(NODE-7179): migrate unit/operations/aggregate.test.ts --- test/unit/operations/aggregate.test.ts | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/unit/operations/aggregate.test.ts b/test/unit/operations/aggregate.test.ts index f02e83d7f51..92ef7c3065d 100644 --- a/test/unit/operations/aggregate.test.ts +++ b/test/unit/operations/aggregate.test.ts @@ -1,7 +1,8 @@ import { expect } from 'chai'; +import { WriteConcern } from '../../../src'; import { AggregateOperation } from '../../../src/operations/aggregate'; -import { MongoDBNamespace, WriteConcern } from '../../mongodb'; +import { MongoDBNamespace } from '../../../src/utils'; describe('AggregateOperation', function () { const ns = new MongoDBNamespace('test', 'coll'); @@ -64,7 +65,7 @@ describe('AggregateOperation', function () { }); context('when no writable stages', function () { - const operation = new AggregateOperation(ns, [{ $project: { name: 1 } }], { dbName: ns }); + const operation = new AggregateOperation(ns, [{ $project: { name: 1 } }], { dbName: ns.db }); it('sets hasWriteStage to false', function () { expect(operation.hasWriteStage).to.be.false; From 380ee7d7d182704842474df0dc4a3b0dd0e3c343 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 15:24:37 +0200 Subject: [PATCH 02/41] test(NODE-7179): migrate integration/auth/* --- test/integration/auth/auth.prose.test.ts | 5 ++++- test/integration/auth/mongodb_aws.test.ts | 14 +++++++------- test/integration/auth/mongodb_oidc.prose.test.ts | 4 ++-- .../auth/mongodb_oidc_azure.prose.05.test.ts | 2 +- .../auth/mongodb_oidc_gcp.prose.06.test.ts | 2 +- .../auth/mongodb_oidc_k8s.prose.07.test.ts | 2 +- 6 files changed, 16 insertions(+), 13 deletions(-) diff --git a/test/integration/auth/auth.prose.test.ts b/test/integration/auth/auth.prose.test.ts index d9408a42001..50e1cb7172b 100644 --- a/test/integration/auth/auth.prose.test.ts +++ b/test/integration/auth/auth.prose.test.ts @@ -1,7 +1,10 @@ import { expect } from 'chai'; import * as sinon from 'sinon'; -import { Connection, LEGACY_HELLO_COMMAND, type MongoClient, ScramSHA256 } from '../../mongodb'; +import { type MongoClient } from '../../../src'; +import { ScramSHA256 } from '../../../src/cmap/auth/scram'; +import { Connection } from '../../../src/cmap/connection'; +import { LEGACY_HELLO_COMMAND } from '../../../src/constants'; import { type TestConfiguration } from '../../tools/runner/config'; function makeConnectionString(config, username, password) { diff --git a/test/integration/auth/mongodb_aws.test.ts b/test/integration/auth/mongodb_aws.test.ts index 24b8c678e81..3dff1d642a5 100644 --- a/test/integration/auth/mongodb_aws.test.ts +++ b/test/integration/auth/mongodb_aws.test.ts @@ -5,22 +5,22 @@ import * as http from 'http'; import { performance } from 'perf_hooks'; import * as sinon from 'sinon'; -import { refreshKMSCredentials } from '../../../src/client-side-encryption/providers'; import { - AWSSDKCredentialProvider, type CommandOptions, - Connection, type Document, MongoAWSError, type MongoClient, - MongoDBAWS, type MongoDBNamespace, type MongoDBResponseConstructor, MongoMissingCredentialsError, MongoMissingDependencyError, - MongoServerError, - setDifference -} from '../../mongodb'; + MongoServerError +} from '../../../src'; +import { refreshKMSCredentials } from '../../../src/client-side-encryption/providers'; +import { AWSSDKCredentialProvider } from '../../../src/cmap/auth/aws_temporary_credentials'; +import { MongoDBAWS } from '../../../src/cmap/auth/mongodb_aws'; +import { Connection } from '../../../src/cmap/connection'; +import { setDifference } from '../../../src/utils'; const isMongoDBAWSAuthEnvironment = (process.env.MONGODB_URI ?? '').includes('MONGODB-AWS'); diff --git a/test/integration/auth/mongodb_oidc.prose.test.ts b/test/integration/auth/mongodb_oidc.prose.test.ts index 3b821b68832..ea9321ac88a 100644 --- a/test/integration/auth/mongodb_oidc.prose.test.ts +++ b/test/integration/auth/mongodb_oidc.prose.test.ts @@ -8,10 +8,10 @@ import { type ClientSession, type Collection, MongoClient, - type MongoDBOIDC, type OIDCCallbackParams, type OIDCResponse -} from '../../mongodb'; +} from '../../../src'; +import { type MongoDBOIDC } from '../../../src/cmap/auth/mongodb_oidc'; const createCallback = (tokenFile = 'test_user1', expiresInSeconds?: number, extraFields?: any) => { return async (params: OIDCCallbackParams) => { diff --git a/test/integration/auth/mongodb_oidc_azure.prose.05.test.ts b/test/integration/auth/mongodb_oidc_azure.prose.05.test.ts index 847678537e4..0eca37979b4 100644 --- a/test/integration/auth/mongodb_oidc_azure.prose.05.test.ts +++ b/test/integration/auth/mongodb_oidc_azure.prose.05.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { type Collection, MongoClient, type MongoClientOptions } from '../../mongodb'; +import { type Collection, MongoClient, type MongoClientOptions } from '../../../src'; const DEFAULT_URI = 'mongodb://127.0.0.1:27017'; diff --git a/test/integration/auth/mongodb_oidc_gcp.prose.06.test.ts b/test/integration/auth/mongodb_oidc_gcp.prose.06.test.ts index 42b36e7f279..2385222061b 100644 --- a/test/integration/auth/mongodb_oidc_gcp.prose.06.test.ts +++ b/test/integration/auth/mongodb_oidc_gcp.prose.06.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { type Collection, MongoClient, type MongoClientOptions } from '../../mongodb'; +import { type Collection, MongoClient, type MongoClientOptions } from '../../../src'; const DEFAULT_URI = 'mongodb://127.0.0.1:27017'; diff --git a/test/integration/auth/mongodb_oidc_k8s.prose.07.test.ts b/test/integration/auth/mongodb_oidc_k8s.prose.07.test.ts index de1c5ca49e6..a6de8ad7ea4 100644 --- a/test/integration/auth/mongodb_oidc_k8s.prose.07.test.ts +++ b/test/integration/auth/mongodb_oidc_k8s.prose.07.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { type Collection, MongoClient } from '../../mongodb'; +import { type Collection, MongoClient } from '../../../src'; const DEFAULT_URI = 'mongodb://127.0.0.1:27017'; From 500a4bbac0c1486875b66d1cc23c6335a25f99f9 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 15:27:05 +0200 Subject: [PATCH 03/41] test(NODE-7179): migrate integration/causal-consistency/* --- .../causal-consistency/causal_consistency.prose.test.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/causal-consistency/causal_consistency.prose.test.js b/test/integration/causal-consistency/causal_consistency.prose.test.js index eb428404009..bc7bbeed6e0 100644 --- a/test/integration/causal-consistency/causal_consistency.prose.test.js +++ b/test/integration/causal-consistency/causal_consistency.prose.test.js @@ -1,6 +1,6 @@ 'use strict'; -const { LEGACY_HELLO_COMMAND } = require('../../mongodb'); +const { LEGACY_HELLO_COMMAND } = require('../../../src/constants'); const { setupDatabase } = require('../shared'); const { expect } = require('chai'); From 2cce19c1b4174d3e28db682712f5b7002db46b71 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 15:31:50 +0200 Subject: [PATCH 04/41] test(NODE-7179): migrate integration/client-side-encryption/* --- ...client_side_encryption.prose.06.corpus.test.ts | 2 +- ...lient_side_encryption.prose.10.kms_tls.test.ts | 2 +- ...ient_side_encryption.prose.12.deadlock.test.ts | 2 +- ..._encryption.prose.14.decryption_events.test.ts | 4 ++-- ...side_encryption.prose.17.on_demand_gcp.test.ts | 2 +- ...ryption.prose.18.azure_kms_mock_server.test.ts | 2 +- ...de_encryption.prose.19.on_demand_azure.test.ts | 2 +- ...encryption.prose.20.mongocryptd_client.test.ts | 2 +- ...rose.21.automatic_data_encryption_keys.test.ts | 2 +- ...ion.prose.23.range_encryption_defaults.test.ts | 2 +- ...client_side_encryption.prose.25.lookup.test.ts | 2 +- ...ose.26.custom_aws_credential_providers.test.ts | 2 +- ..._side_encryption.prose.27.text_queries.test.ts | 2 +- .../client_side_encryption.prose.test.ts | 8 ++++---- .../client-side-encryption/driver.test.ts | 15 +++++++-------- 15 files changed, 25 insertions(+), 26 deletions(-) diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.06.corpus.test.ts b/test/integration/client-side-encryption/client_side_encryption.prose.06.corpus.test.ts index 85e4492a708..0d9657f6162 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.06.corpus.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.prose.06.corpus.test.ts @@ -5,9 +5,9 @@ import { expect } from 'chai'; import * as fs from 'fs'; import * as path from 'path'; +import { type MongoClient, WriteConcern } from '../../../src'; import { ClientEncryption } from '../../../src/client-side-encryption/client_encryption'; import { getCSFLEKMSProviders } from '../../csfle-kms-providers'; -import { type MongoClient, WriteConcern } from '../../mongodb'; import { getEncryptExtraOptions } from '../../tools/utils'; describe('Client Side Encryption Prose Corpus Test', function () { diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.10.kms_tls.test.ts b/test/integration/client-side-encryption/client_side_encryption.prose.10.kms_tls.test.ts index ffd8f621dfc..b2a678a2dc7 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.10.kms_tls.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.prose.10.kms_tls.test.ts @@ -1,8 +1,8 @@ import { expect } from 'chai'; import { satisfies } from 'semver'; +import { ClientEncryption, type MongoClient } from '../../../src'; import { getCSFLEKMSProviders } from '../../csfle-kms-providers'; -import { ClientEncryption, type MongoClient } from '../../mongodb'; const metadata: MongoDBMetadataUI = { requires: { diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.12.deadlock.test.ts b/test/integration/client-side-encryption/client_side_encryption.prose.12.deadlock.test.ts index 1c54b136c24..ed3d9fdadd8 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.12.deadlock.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.prose.12.deadlock.test.ts @@ -3,8 +3,8 @@ import { expect } from 'chai'; import { readFileSync } from 'fs'; import * as path from 'path'; +import { type CommandStartedEvent, type MongoClient, type MongoClientOptions } from '../../../src'; import { ClientEncryption } from '../../../src/client-side-encryption/client_encryption'; -import { type CommandStartedEvent, type MongoClient, type MongoClientOptions } from '../../mongodb'; import { type TestConfiguration } from '../../tools/runner/config'; import { getEncryptExtraOptions } from '../../tools/utils'; import { dropCollection } from '../shared'; diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.14.decryption_events.test.ts b/test/integration/client-side-encryption/client_side_encryption.prose.14.decryption_events.test.ts index 9412f78c9b8..4177efaeb08 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.14.decryption_events.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.prose.14.decryption_events.test.ts @@ -1,6 +1,5 @@ import { expect } from 'chai'; -import { ClientEncryption } from '../../../src/client-side-encryption/client_encryption'; import { Binary, BSON, @@ -8,7 +7,8 @@ import { type CommandSucceededEvent, type MongoClient, MongoNetworkError -} from '../../mongodb'; +} from '../../../src'; +import { ClientEncryption } from '../../../src/client-side-encryption/client_encryption'; import { getEncryptExtraOptions } from '../../tools/utils'; const metadata: MongoDBMetadataUI = { diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.17.on_demand_gcp.test.ts b/test/integration/client-side-encryption/client_side_encryption.prose.17.on_demand_gcp.test.ts index 8fb7583a6e3..fe185a0b360 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.17.on_demand_gcp.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.prose.17.on_demand_gcp.test.ts @@ -1,8 +1,8 @@ import { expect } from 'chai'; import { env } from 'process'; +import { Binary } from '../../../src'; import { ClientEncryption } from '../../../src/client-side-encryption/client_encryption'; -import { Binary } from '../../mongodb'; const dataKeyOptions = { masterKey: { diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.18.azure_kms_mock_server.test.ts b/test/integration/client-side-encryption/client_side_encryption.prose.18.azure_kms_mock_server.test.ts index 7b4c92ad53a..604d9e7b3dd 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.18.azure_kms_mock_server.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.prose.18.azure_kms_mock_server.test.ts @@ -1,11 +1,11 @@ import { expect } from 'chai'; +import { type Document } from '../../../src'; import { MongoCryptAzureKMSRequestError } from '../../../src/client-side-encryption/errors'; import { type AzureKMSRequestOptions, fetchAzureKMSToken } from '../../../src/client-side-encryption/providers/azure'; -import { type Document } from '../../mongodb'; const BASE_URL = new URL(`http://127.0.0.1:8080/metadata/identity/oauth2/token`); class KMSRequestOptions implements AzureKMSRequestOptions { diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.19.on_demand_azure.test.ts b/test/integration/client-side-encryption/client_side_encryption.prose.19.on_demand_azure.test.ts index b0b93d7655b..75db5b3a1b3 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.19.on_demand_azure.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.prose.19.on_demand_azure.test.ts @@ -1,9 +1,9 @@ import { expect } from 'chai'; import { env } from 'process'; +import { Binary } from '../../../src'; import { ClientEncryption } from '../../../src/client-side-encryption/client_encryption'; import { MongoCryptAzureKMSRequestError } from '../../../src/client-side-encryption/errors'; -import { Binary } from '../../mongodb'; const dataKeyOptions = { masterKey: { diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.20.mongocryptd_client.test.ts b/test/integration/client-side-encryption/client_side_encryption.prose.20.mongocryptd_client.test.ts index 98bd88a9bdf..e199f568d63 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.20.mongocryptd_client.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.prose.20.mongocryptd_client.test.ts @@ -2,8 +2,8 @@ import { expect } from 'chai'; import { once } from 'events'; import { createServer, type Server } from 'net'; +import { type MongoClient } from '../../../src'; import { getCSFLEKMSProviders } from '../../csfle-kms-providers'; -import { type MongoClient } from '../../mongodb'; import { getEncryptExtraOptions } from '../../tools/utils'; describe('20. Bypass creating mongocryptd client when shared library is loaded', function () { diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.21.automatic_data_encryption_keys.test.ts b/test/integration/client-side-encryption/client_side_encryption.prose.21.automatic_data_encryption_keys.test.ts index 24afe0e7315..4ece8360ab7 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.21.automatic_data_encryption_keys.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.prose.21.automatic_data_encryption_keys.test.ts @@ -1,5 +1,6 @@ import { expect } from 'chai'; +import { Collection, type Db, MongoServerError } from '../../../src'; import { ClientEncryption } from '../../../src/client-side-encryption/client_encryption'; import { MongoCryptCreateEncryptedCollectionError } from '../../../src/client-side-encryption/errors'; import { @@ -7,7 +8,6 @@ import { kmsCredentialsPresent, missingKeys } from '../../csfle-kms-providers'; -import { Collection, type Db, MongoServerError } from '../../mongodb'; const metadata: MongoDBMetadataUI = { requires: { diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.23.range_encryption_defaults.test.ts b/test/integration/client-side-encryption/client_side_encryption.prose.23.range_encryption_defaults.test.ts index c6deedc2f8d..9d1439d54cc 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.23.range_encryption_defaults.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.prose.23.range_encryption_defaults.test.ts @@ -1,8 +1,8 @@ import { expect } from 'chai'; +import { type Binary, Int32, Long } from '../../../src'; import { ClientEncryption } from '../../../src/client-side-encryption/client_encryption'; import { getCSFLEKMSProviders } from '../../csfle-kms-providers'; -import { type Binary, Int32, Long } from '../../mongodb'; const metaData: MongoDBMetadataUI = { requires: { diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.25.lookup.test.ts b/test/integration/client-side-encryption/client_side_encryption.prose.25.lookup.test.ts index 0c873ee1ccd..4f7883db55b 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.25.lookup.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.prose.25.lookup.test.ts @@ -3,8 +3,8 @@ import * as path from 'node:path'; import { expect } from 'chai'; +import { BSON, type Document, type MongoClient } from '../../../src'; import { getCSFLEKMSProviders } from '../../csfle-kms-providers'; -import { BSON, type Document, type MongoClient } from '../../mongodb'; import { type TestConfiguration } from '../../tools/runner/config'; import { getEncryptExtraOptions } from '../../tools/utils'; diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.26.custom_aws_credential_providers.test.ts b/test/integration/client-side-encryption/client_side_encryption.prose.26.custom_aws_credential_providers.test.ts index 073aa3cbc2c..c95f3733acc 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.26.custom_aws_credential_providers.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.prose.26.custom_aws_credential_providers.test.ts @@ -1,7 +1,7 @@ import { expect } from 'chai'; +import { AWSSDKCredentialProvider, Binary, MongoClient } from '../../../src'; import { ClientEncryption } from '../../../src/client-side-encryption/client_encryption'; -import { AWSSDKCredentialProvider, Binary, MongoClient } from '../../mongodb'; import { getEncryptExtraOptions } from '../../tools/utils'; const metadata: MongoDBMetadataUI = { diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.27.text_queries.test.ts b/test/integration/client-side-encryption/client_side_encryption.prose.27.text_queries.test.ts index 672c37fafd4..0aef976f8b6 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.27.text_queries.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.prose.27.text_queries.test.ts @@ -4,8 +4,8 @@ import { join } from 'node:path'; import { type Binary, type Document, EJSON } from 'bson'; import { expect } from 'chai'; +import { ClientEncryption, type MongoClient, MongoDBCollectionNamespace } from '../../../src'; import { getCSFLEKMSProviders } from '../../csfle-kms-providers'; -import { ClientEncryption, type MongoClient, MongoDBCollectionNamespace } from '../../mongodb'; const metadata: MongoDBMetadataUI = { requires: { diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.test.ts b/test/integration/client-side-encryption/client_side_encryption.prose.test.ts index b76687eccc7..280016da601 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.prose.test.ts @@ -4,16 +4,16 @@ import * as fs from 'fs/promises'; import * as path from 'path'; import { satisfies } from 'semver'; -import { ClientEncryption } from '../../../src/client-side-encryption/client_encryption'; -import { getCSFLEKMSProviders } from '../../csfle-kms-providers'; import { - LEGACY_HELLO_COMMAND, MongoClient, MongoCryptError, MongoRuntimeError, MongoServerError, MongoServerSelectionError -} from '../../mongodb'; +} from '../../../src'; +import { ClientEncryption } from '../../../src/client-side-encryption/client_encryption'; +import { LEGACY_HELLO_COMMAND } from '../../../src/constants'; +import { getCSFLEKMSProviders } from '../../csfle-kms-providers'; import { AlpineTestConfiguration } from '../../tools/runner/config'; import { getEncryptExtraOptions } from '../../tools/utils'; import { APMEventCollector, dropCollection } from '../shared'; diff --git a/test/integration/client-side-encryption/driver.test.ts b/test/integration/client-side-encryption/driver.test.ts index 02fe82d336e..8e0500733ca 100644 --- a/test/integration/client-side-encryption/driver.test.ts +++ b/test/integration/client-side-encryption/driver.test.ts @@ -6,22 +6,21 @@ import * as sinon from 'sinon'; import { setTimeout } from 'timers/promises'; import * as tls from 'tls'; -import { ClientEncryption } from '../../../src/client-side-encryption/client_encryption'; -import { getCSFLEKMSProviders } from '../../csfle-kms-providers'; import { BSON, type Collection, type CommandStartedEvent, Connection, - CSOTTimeoutContext, type MongoClient, MongoCryptCreateDataKeyError, MongoCryptCreateEncryptedCollectionError, - MongoOperationTimeoutError, - resolveTimeoutOptions, - StateMachine, - TimeoutContext -} from '../../mongodb'; + MongoOperationTimeoutError +} from '../../../src'; +import { ClientEncryption } from '../../../src/client-side-encryption/client_encryption'; +import { StateMachine } from '../../../src/client-side-encryption/state_machine'; +import { CSOTTimeoutContext, TimeoutContext } from '../../../src/timeout'; +import { resolveTimeoutOptions } from '../../../src/utils'; +import { getCSFLEKMSProviders } from '../../csfle-kms-providers'; import { clearFailPoint, configureFailPoint, From 9f06e494c255ba7d622c785cc21b4a293a5620e3 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 15:34:00 +0200 Subject: [PATCH 05/41] test(NODE-7179): migrate integration/client-side-operations-timeout/* --- .../client_side_operations_timeout.prose.test.ts | 7 +++---- .../client_side_operations_timeout.unit.test.ts | 4 ++-- .../client-side-operations-timeout/node_csot.test.ts | 8 ++++---- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index eca944afb3a..52d77487b2b 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -17,10 +17,9 @@ import { MongoClient, MongoOperationTimeoutError, MongoServerSelectionError, - now, - ObjectId, - squashError -} from '../../mongodb'; + ObjectId +} from '../../../src'; +import { now, squashError } from '../../../src/utils'; import { clearFailPoint, configureFailPoint, diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index 0eef0318bac..9a62c26a823 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -10,7 +10,6 @@ import { setTimeout } from 'timers'; import { TLSSocket } from 'tls'; import { promisify } from 'util'; -import { StateMachine } from '../../../src/client-side-encryption/state_machine'; import { Connection, ConnectionPool, @@ -21,7 +20,8 @@ import { Timeout, TimeoutContext, Topology -} from '../../mongodb'; +} from '../../../src'; +import { StateMachine } from '../../../src/client-side-encryption/state_machine'; import { measureDuration, sleep } from '../../tools/utils'; import { createTimerSandbox } from '../../unit/timer_sandbox'; diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index 7aad267083d..6840a76a496 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -17,20 +17,20 @@ import { type CommandFailedEvent, type CommandStartedEvent, type CommandSucceededEvent, - Connection, CursorTimeoutMode, type Db, type FindCursor, GridFSBucket, - LEGACY_HELLO_COMMAND, type MongoClient, MongoInvalidArgumentError, MongoOperationTimeoutError, MongoServerError, ObjectId, - promiseWithResolvers, TopologyType -} from '../../mongodb'; +} from '../../../src'; +import { Connection } from '../../../src/cmap/connection'; +import { LEGACY_HELLO_COMMAND } from '../../../src/constants'; +import { promiseWithResolvers } from '../../../src/utils'; import { type FailCommandFailPoint, type FailPoint, waitUntilPoolsFilled } from '../../tools/utils'; const metadata = { requires: { mongodb: '>=4.4' } }; From 150d37c54ab3c8657aee272b86f32015c46611d4 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 15:34:58 +0200 Subject: [PATCH 06/41] test(NODE-7179): migrate integration/command-logging-and-monitoring/* --- .../command_logging_and_monitoring.prose.test.ts | 3 ++- .../command-logging-and-monitoring/command_monitoring.test.ts | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/test/integration/command-logging-and-monitoring/command_logging_and_monitoring.prose.test.ts b/test/integration/command-logging-and-monitoring/command_logging_and_monitoring.prose.test.ts index 7beac02a275..c55f10c1167 100644 --- a/test/integration/command-logging-and-monitoring/command_logging_and_monitoring.prose.test.ts +++ b/test/integration/command-logging-and-monitoring/command_logging_and_monitoring.prose.test.ts @@ -1,6 +1,7 @@ import { expect } from 'chai'; -import { DEFAULT_MAX_DOCUMENT_LENGTH, type Document } from '../../mongodb'; +import { type Document } from '../../../src'; +import { DEFAULT_MAX_DOCUMENT_LENGTH } from '../../../src/mongo_logger'; describe('Command Logging and Monitoring Prose Tests', function () { const ELLIPSES_LENGTH = 3; diff --git a/test/integration/command-logging-and-monitoring/command_monitoring.test.ts b/test/integration/command-logging-and-monitoring/command_monitoring.test.ts index b089ca7d65f..01c596c07ff 100644 --- a/test/integration/command-logging-and-monitoring/command_monitoring.test.ts +++ b/test/integration/command-logging-and-monitoring/command_monitoring.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { type MongoClient, ObjectId, ReadPreference } from '../../mongodb'; +import { type MongoClient, ObjectId, ReadPreference } from '../../../src'; import { filterForCommands, setupDatabase } from '../shared'; describe('Command Monitoring', function () { From 0ff42b7109386fc46c9a22116b839d28a12cc751 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 15:48:51 +0200 Subject: [PATCH 07/41] test(NODE-7179): migrate integration/connections-survive-step-down/* --- .../connections_survive_step_down.prose.test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/connections-survive-step-down/connections_survive_step_down.prose.test.ts b/test/integration/connections-survive-step-down/connections_survive_step_down.prose.test.ts index 3ba24d95870..2ab148e810a 100644 --- a/test/integration/connections-survive-step-down/connections_survive_step_down.prose.test.ts +++ b/test/integration/connections-survive-step-down/connections_survive_step_down.prose.test.ts @@ -5,10 +5,10 @@ import { type ConnectionPoolClearedEvent, type FindCursor, type MongoClient, - MONGODB_ERROR_CODES, MongoServerError, ReadPreference -} from '../../mongodb'; +} from '../../../src'; +import { MONGODB_ERROR_CODES } from '../../../src/error'; import { type FailCommandFailPoint } from '../../tools/utils'; describe('Connections Survive Primary Step Down - prose', function () { From c05b94689fc297453991e736ba2aef781af9ed4f Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 15:49:47 +0200 Subject: [PATCH 08/41] test(NODE-7179): migrate integration/index_management.test.ts --- test/integration/index_management.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/index_management.test.ts b/test/integration/index_management.test.ts index 05574a26be5..014b52d650b 100644 --- a/test/integration/index_management.test.ts +++ b/test/integration/index_management.test.ts @@ -7,7 +7,7 @@ import { type Db, type MongoClient, MongoServerError -} from '../mongodb'; +} from '../../src'; import { type FailCommandFailPoint } from '../tools/utils'; import { assert as test, filterForCommands, setupDatabase } from './shared'; From 9c361327a925632b6896b32e7a8b8daa510711f1 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 15:50:24 +0200 Subject: [PATCH 09/41] test(NODE-7179): migrate integration/index-management/* --- .../index-management/search-index-management.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/index-management/search-index-management.test.ts b/test/integration/index-management/search-index-management.test.ts index 39d17f22818..f8e98fae91b 100644 --- a/test/integration/index-management/search-index-management.test.ts +++ b/test/integration/index-management/search-index-management.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { type Collection, type CommandStartedEvent, type MongoClient } from '../../mongodb'; +import { type Collection, type CommandStartedEvent, type MongoClient } from '../../../src'; describe('Search Index Management Integration Tests', function () { describe('read concern and write concern ', function () { From bac259dce656e5f3785274808f866a06f7699c48 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 15:51:13 +0200 Subject: [PATCH 10/41] test(NODE-7179): migrate integration/initial-dns-seedlist-discovery/* --- .../initial-dns-seedlist-discovery/dns_seedlist.test.ts | 2 +- .../initial_dns_seedlist_discovery.prose.test.ts | 2 +- .../initial_dns_seedlist_discovery.spec.test.ts | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/integration/initial-dns-seedlist-discovery/dns_seedlist.test.ts b/test/integration/initial-dns-seedlist-discovery/dns_seedlist.test.ts index 9ccc87fab30..0618c376c01 100644 --- a/test/integration/initial-dns-seedlist-discovery/dns_seedlist.test.ts +++ b/test/integration/initial-dns-seedlist-discovery/dns_seedlist.test.ts @@ -2,7 +2,7 @@ import { expect } from 'chai'; import * as dns from 'dns'; import * as sinon from 'sinon'; -import { MongoClient } from '../../mongodb'; +import { MongoClient } from '../../../src'; const metadata: MongoDBMetadataUI = { requires: { topology: '!single' } }; diff --git a/test/integration/initial-dns-seedlist-discovery/initial_dns_seedlist_discovery.prose.test.ts b/test/integration/initial-dns-seedlist-discovery/initial_dns_seedlist_discovery.prose.test.ts index 3f931c019c6..5516760417f 100644 --- a/test/integration/initial-dns-seedlist-discovery/initial_dns_seedlist_discovery.prose.test.ts +++ b/test/integration/initial-dns-seedlist-discovery/initial_dns_seedlist_discovery.prose.test.ts @@ -2,7 +2,7 @@ import { expect } from 'chai'; import * as dns from 'dns'; import * as sinon from 'sinon'; -import { ConnectionPool, MongoAPIError, Server, ServerDescription, Topology } from '../../mongodb'; +import { ConnectionPool, MongoAPIError, Server, ServerDescription, Topology } from '../../../src'; import { topologyWithPlaceholderClient } from '../../tools/utils'; describe('Initial DNS Seedlist Discovery (Prose Tests)', () => { diff --git a/test/integration/initial-dns-seedlist-discovery/initial_dns_seedlist_discovery.spec.test.ts b/test/integration/initial-dns-seedlist-discovery/initial_dns_seedlist_discovery.spec.test.ts index 141c5d6fe58..a0136c331ec 100644 --- a/test/integration/initial-dns-seedlist-discovery/initial_dns_seedlist_discovery.spec.test.ts +++ b/test/integration/initial-dns-seedlist-discovery/initial_dns_seedlist_discovery.spec.test.ts @@ -4,7 +4,7 @@ import * as fs from 'fs'; import * as path from 'path'; import { promisify } from 'util'; -import { HostAddress, MongoClient } from '../../mongodb'; +import { HostAddress, MongoClient } from '../../../src'; function makeTest(test, topology) { let client; From 97884bd94080ace447dda8bae9e1451eab4f6f1e Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 15:53:40 +0200 Subject: [PATCH 11/41] test(NODE-7179): migrate integration/max-staleness/* --- test/integration/max-staleness/max_staleness.test.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/max-staleness/max_staleness.test.js b/test/integration/max-staleness/max_staleness.test.js index 2b22291f0a1..2bb712cd702 100644 --- a/test/integration/max-staleness/max_staleness.test.js +++ b/test/integration/max-staleness/max_staleness.test.js @@ -2,8 +2,8 @@ const { Long } = require('bson'); const { expect } = require('chai'); const mock = require('../../tools/mongodb-mock/index'); -const { ReadPreference } = require('../../mongodb'); -const { isHello } = require('../../mongodb'); +const { ReadPreference } = require('../../../src'); +const { isHello } = require('../../../src/utils'); const test = {}; // TODO (NODE-3799): convert these to run against a real server From cbfd71073cc6b8930bc1ae553d638fa7179742b9 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 16:00:21 +0200 Subject: [PATCH 12/41] test(NODE-7179): migrate integration/mongodb-handshake/* --- .../mongodb-handshake.prose.test.ts | 38 ++++++++----------- .../mongodb-handshake.test.ts | 13 ++----- 2 files changed, 20 insertions(+), 31 deletions(-) diff --git a/test/integration/mongodb-handshake/mongodb-handshake.prose.test.ts b/test/integration/mongodb-handshake/mongodb-handshake.prose.test.ts index f60c439fe21..55eaee1ec45 100644 --- a/test/integration/mongodb-handshake/mongodb-handshake.prose.test.ts +++ b/test/integration/mongodb-handshake/mongodb-handshake.prose.test.ts @@ -1,16 +1,10 @@ import { expect } from 'chai'; import * as sinon from 'sinon'; -import { type ClientMetadata, type DriverInfo } from '../../../mongodb'; -import { MongoClient as RawMongoClient } from '../../../src'; -import { - Connection, - getFAASEnv, - Int32, - isDriverInfoEqual, - LEGACY_HELLO_COMMAND, - type MongoClient -} from '../../mongodb'; +import { type ClientMetadata, type DriverInfo, Int32, MongoClient } from '../../../src'; +import { Connection } from '../../../src/cmap/connection'; +import { getFAASEnv, isDriverInfoEqual } from '../../../src/cmap/handshake/client_metadata'; +import { LEGACY_HELLO_COMMAND } from '../../../src/constants'; import { sleep } from '../../tools/utils'; type EnvironmentVariables = Array<[string, string]>; @@ -368,7 +362,7 @@ describe('Client Metadata Update Prose Tests', function () { let updatedClientMetadata: ClientMetadata; // TODO(NODE-6599): mongodb-legacy adds additional client metadata, breaking these prose tests - let client: RawMongoClient; + let client: MongoClient; // | Case | Name | Version | Platform | // | ---- | --------- | ------- | ------------------ | @@ -403,7 +397,7 @@ describe('Client Metadata Update Prose Tests', function () { // 4. Save intercepted `client` document as `updatedClientMetadata`. // 5. Wait 5ms for the connection to become idle. beforeEach(async function () { - client = new RawMongoClient(this.configuration.url(), { + client = new MongoClient(this.configuration.url(), { maxIdleTimeMS: 1, serverApi: this.configuration.serverApi }); @@ -485,7 +479,7 @@ describe('Client Metadata Update Prose Tests', function () { let initialClientMetadata: ClientMetadata; let updatedClientMetadata: ClientMetadata; // TODO(NODE-6599): mongodb-legacy adds additional client metadata, breaking these prose tests - let client: RawMongoClient; + let client: MongoClient; afterEach(async function () { await client.close(); @@ -495,7 +489,7 @@ describe('Client Metadata Update Prose Tests', function () { // 1. Create a `MongoClient` instance with: // - `maxIdleTimeMS` set to `1ms` - client = new RawMongoClient(this.configuration.url(), { + client = new MongoClient(this.configuration.url(), { maxIdleTimeMS: 1, serverApi: this.configuration.serverApi }); @@ -578,7 +572,7 @@ describe('Client Metadata Update Prose Tests', function () { let initialClientMetadata: ClientMetadata; let updatedClientMetadata: ClientMetadata; // TODO(NODE-6599): mongodb-legacy adds additional client metadata, breaking these prose tests - let client: RawMongoClient; + let client: MongoClient; // 1. Create a `MongoClient` instance with the following: // - `maxIdleTimeMS` set to `1ms` @@ -600,7 +594,7 @@ describe('Client Metadata Update Prose Tests', function () { // | name | library | // | version | 1.2 | // | platform | Library Platform | - client = new RawMongoClient(this.configuration.url(), { + client = new MongoClient(this.configuration.url(), { maxIdleTimeMS: 1, serverApi: this.configuration.serverApi, driverInfo: { name: 'library', version: '1.2', platform: 'Library Platform' } @@ -656,7 +650,7 @@ describe('Client Metadata Update Prose Tests', function () { let clientMetadata: ClientMetadata; let updatedClientMetadata: ClientMetadata; // TODO(NODE-6599): mongodb-legacy adds additional client metadata, breaking these prose tests - let client: RawMongoClient; + let client: MongoClient; afterEach(async function () { await client.close(); @@ -672,7 +666,7 @@ describe('Client Metadata Update Prose Tests', function () { // | version | 1.2 | // | platform | Library Platform | - client = new RawMongoClient(this.configuration.url(), { + client = new MongoClient(this.configuration.url(), { maxIdleTimeMS: 1, driverInfo: { name: 'library', @@ -748,7 +742,7 @@ describe('Client Metadata Update Prose Tests', function () { let initialClientMetadata: ClientMetadata; let updatedClientMetadata: ClientMetadata; // TODO(NODE-6599): mongodb-legacy adds additional client metadata, breaking these prose tests - let client: RawMongoClient; + let client: MongoClient; afterEach(async function () { await client.close(); @@ -803,7 +797,7 @@ describe('Client Metadata Update Prose Tests', function () { it('does not appended duplicate metadata', async function () { // 1. Create a `MongoClient` instance with: // - `maxIdleTimeMS` set to `1ms` - client = new RawMongoClient(this.configuration.url(), { + client = new MongoClient(this.configuration.url(), { maxIdleTimeMS: 1, serverApi: this.configuration.serverApi }); @@ -852,7 +846,7 @@ describe('Client Metadata Update Prose Tests', function () { let initialClientMetadata: ClientMetadata; let updatedClientMetadata: ClientMetadata; // TODO(NODE-6599): mongodb-legacy adds additional client metadata, breaking these prose tests - let client: RawMongoClient; + let client: MongoClient; afterEach(async function () { await client.close(); @@ -908,7 +902,7 @@ describe('Client Metadata Update Prose Tests', function () { // 1. Create a `MongoClient` instance with: // - `maxIdleTimeMS` set to `1ms` // - `driverInfo` set to the `DriverInfoOptions` from the selected test case from the initial metadata section. - client = new RawMongoClient(this.configuration.url(), { + client = new MongoClient(this.configuration.url(), { maxIdleTimeMS: 1, serverApi: this.configuration.serverApi, driverInfo: metadata.initial diff --git a/test/integration/mongodb-handshake/mongodb-handshake.test.ts b/test/integration/mongodb-handshake/mongodb-handshake.test.ts index da5f0a9896f..92b538a5f66 100644 --- a/test/integration/mongodb-handshake/mongodb-handshake.test.ts +++ b/test/integration/mongodb-handshake/mongodb-handshake.test.ts @@ -3,15 +3,10 @@ import type Sinon from 'sinon'; // eslint-disable-next-line no-duplicate-imports import * as sinon from 'sinon'; -import { - Connection, - LEGACY_HELLO_COMMAND, - MongoServerError, - MongoServerSelectionError, - OpMsgRequest, - OpQueryRequest, - ServerApiVersion -} from '../../mongodb'; +import { MongoServerError, MongoServerSelectionError, ServerApiVersion } from '../../../src'; +import { OpMsgRequest, OpQueryRequest } from '../../../src/cmap/commands'; +import { Connection } from '../../../src/cmap/connection'; +import { LEGACY_HELLO_COMMAND } from '../../../src/constants'; describe('MongoDB Handshake', () => { let client; From 6f6101e52fb914ab8866ed15c6a93d6d8be5fc9f Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 16:02:15 +0200 Subject: [PATCH 13/41] test(NODE-7179): migrate integration/retryable-writes/* --- .../non-server-retryable_writes.test.ts | 10 +++------- .../retryable_writes.spec.prose.test.ts | 6 +++--- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/test/integration/retryable-writes/non-server-retryable_writes.test.ts b/test/integration/retryable-writes/non-server-retryable_writes.test.ts index 326036f120b..3cfd4677738 100644 --- a/test/integration/retryable-writes/non-server-retryable_writes.test.ts +++ b/test/integration/retryable-writes/non-server-retryable_writes.test.ts @@ -1,13 +1,9 @@ import { expect } from 'chai'; import * as sinon from 'sinon'; -import { - type Collection, - type MongoClient, - MongoWriteConcernError, - PoolClearedError, - Server -} from '../../mongodb'; +import { type Collection, type MongoClient, MongoWriteConcernError } from '../../../src'; +import { PoolClearedError } from '../../../src/cmap/errors'; +import { Server } from '../../../src/sdam/server'; describe('Non Server Retryable Writes', function () { let client: MongoClient; diff --git a/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts b/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts index 22f475978d0..58fe83ca97a 100644 --- a/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts +++ b/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts @@ -9,9 +9,9 @@ import { type MongoClient, MongoError, MongoServerError, - MongoWriteConcernError, - Server -} from '../../mongodb'; + MongoWriteConcernError +} from '../../../src'; +import { Server } from '../../../src/sdam/server'; import { sleep } from '../../tools/utils'; describe('Retryable Writes Spec Prose', () => { From 48d73946105b500ff0c02d822c405aecbab9ee8c Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 16:02:55 +0200 Subject: [PATCH 14/41] test(NODE-7179): migrate integration/run-command/* --- test/integration/run-command/run_command.test.ts | 2 +- test/integration/run-command/run_cursor_command.test.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/run-command/run_command.test.ts b/test/integration/run-command/run_command.test.ts index e71d3887bfe..eaa744806b6 100644 --- a/test/integration/run-command/run_command.test.ts +++ b/test/integration/run-command/run_command.test.ts @@ -7,7 +7,7 @@ import { ReadConcern, ReadPreference, WriteConcern -} from '../../mongodb'; +} from '../../../src'; describe('RunCommand API', () => { let client: MongoClient; diff --git a/test/integration/run-command/run_cursor_command.test.ts b/test/integration/run-command/run_cursor_command.test.ts index 8b88c313f73..4b4fd80b5be 100644 --- a/test/integration/run-command/run_cursor_command.test.ts +++ b/test/integration/run-command/run_cursor_command.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { type Db, type MongoClient } from '../../mongodb'; +import { type Db, type MongoClient } from '../../../src'; describe('runCursorCommand API', () => { let client: MongoClient; From 54263976282432b970f1a1347bf7586684abfad3 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 16:05:16 +0200 Subject: [PATCH 15/41] test(NODE-7179): migrate integration/server-discovery-and-monitoring/* --- .../server_description.test.ts | 2 +- .../server_discover_and_monitoring.test.ts | 8 ++------ .../server_discovery_and_monitoring.prose.test.ts | 4 ++-- .../topology_description.test.ts | 8 ++------ 4 files changed, 7 insertions(+), 15 deletions(-) diff --git a/test/integration/server-discovery-and-monitoring/server_description.test.ts b/test/integration/server-discovery-and-monitoring/server_description.test.ts index 0bc9112c4b0..de14fb21bbb 100644 --- a/test/integration/server-discovery-and-monitoring/server_description.test.ts +++ b/test/integration/server-discovery-and-monitoring/server_description.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { MongoClient } from '../../mongodb'; +import { MongoClient } from '../../../src'; import { configureMongocryptdSpawnHooks } from '../../tools/utils'; describe('class ServerDescription', function () { diff --git a/test/integration/server-discovery-and-monitoring/server_discover_and_monitoring.test.ts b/test/integration/server-discovery-and-monitoring/server_discover_and_monitoring.test.ts index 282d081b45c..cceca051f5e 100644 --- a/test/integration/server-discovery-and-monitoring/server_discover_and_monitoring.test.ts +++ b/test/integration/server-discovery-and-monitoring/server_discover_and_monitoring.test.ts @@ -3,12 +3,8 @@ import { setTimeout } from 'node:timers/promises'; import { expect } from 'chai'; import * as sinon from 'sinon'; -import { - Connection, - type MongoClient, - promiseWithResolvers, - type ServerHeartbeatSucceededEvent -} from '../../mongodb'; +import { Connection, type MongoClient, type ServerHeartbeatSucceededEvent } from '../../../src'; +import { promiseWithResolvers } from '../../../src/utils'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; diff --git a/test/integration/server-discovery-and-monitoring/server_discovery_and_monitoring.prose.test.ts b/test/integration/server-discovery-and-monitoring/server_discovery_and_monitoring.prose.test.ts index ad8ade8c7dd..a412a2036e5 100644 --- a/test/integration/server-discovery-and-monitoring/server_discovery_and_monitoring.prose.test.ts +++ b/test/integration/server-discovery-and-monitoring/server_discovery_and_monitoring.prose.test.ts @@ -1,13 +1,13 @@ import { expect } from 'chai'; import { once } from 'events'; +import { type MongoClient } from '../../../src'; import { CONNECTION_POOL_CLEARED, CONNECTION_POOL_READY, - type MongoClient, SERVER_HEARTBEAT_FAILED, SERVER_HEARTBEAT_SUCCEEDED -} from '../../mongodb'; +} from '../../../src/constants'; describe('Server Discovery and Monitoring Prose Tests', function () { context('Monitors sleep at least minHeartbeatFrequencyMS between checks', function () { diff --git a/test/integration/server-discovery-and-monitoring/topology_description.test.ts b/test/integration/server-discovery-and-monitoring/topology_description.test.ts index 291e13dd70e..47f5fcb862d 100644 --- a/test/integration/server-discovery-and-monitoring/topology_description.test.ts +++ b/test/integration/server-discovery-and-monitoring/topology_description.test.ts @@ -1,11 +1,7 @@ import { expect } from 'chai'; -import { - getTopology, - type MongoClient, - type MongoClientOptions, - TopologyType -} from '../../mongodb'; +import { type MongoClient, type MongoClientOptions, TopologyType } from '../../../src'; +import { getTopology } from '../../../src/utils'; describe('TopologyDescription (integration tests)', function () { let client: MongoClient; From 4d4a2ca639950bd3e4651c9f59a7c89d1398b126 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 16:06:13 +0200 Subject: [PATCH 16/41] test(NODE-7179): migrate test/csfle-kms-providers.ts --- test/csfle-kms-providers.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/csfle-kms-providers.ts b/test/csfle-kms-providers.ts index dc1d3502bbc..9825c6f2d6a 100644 --- a/test/csfle-kms-providers.ts +++ b/test/csfle-kms-providers.ts @@ -1,4 +1,4 @@ -import { type KMSProviders } from './mongodb'; +import { type KMSProviders } from './../src'; const csfleKMSProviders = { aws: { From 86cc350a29a48de6aff7f06c3c7fc3eefe158857 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 16:07:46 +0200 Subject: [PATCH 17/41] test(NODE-7179): migrate test/manual/* --- test/manual/atlas_connectivity.test.ts | 3 ++- test/manual/kerberos.test.ts | 2 +- test/manual/ldap.test.ts | 2 +- test/manual/search-index-management.prose.test.ts | 8 +------- test/manual/socks5.test.ts | 3 ++- test/manual/tls_support.test.ts | 8 ++------ 6 files changed, 9 insertions(+), 17 deletions(-) diff --git a/test/manual/atlas_connectivity.test.ts b/test/manual/atlas_connectivity.test.ts index 61b7c0d0b99..288da73b268 100644 --- a/test/manual/atlas_connectivity.test.ts +++ b/test/manual/atlas_connectivity.test.ts @@ -1,4 +1,5 @@ -import { LEGACY_HELLO_COMMAND, MongoClient } from '../mongodb'; +import { MongoClient } from '../../src'; +import { LEGACY_HELLO_COMMAND } from '../../src/constants'; /** * ATLAS_CONNECTIVITY env variable is JSON diff --git a/test/manual/kerberos.test.ts b/test/manual/kerberos.test.ts index a78adc72060..92111b25853 100644 --- a/test/manual/kerberos.test.ts +++ b/test/manual/kerberos.test.ts @@ -2,7 +2,7 @@ import * as chai from 'chai'; import { promises as dns } from 'dns'; import * as sinon from 'sinon'; -import { MongoClient } from '../mongodb'; +import { MongoClient } from '../../src'; const expect = chai.expect; diff --git a/test/manual/ldap.test.ts b/test/manual/ldap.test.ts index cec70308d57..c727e0ffe54 100644 --- a/test/manual/ldap.test.ts +++ b/test/manual/ldap.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { MongoClient } from '../mongodb'; +import { MongoClient } from '../../src'; describe('LDAP', function () { const { SASL_USER, SASL_PASS, SASL_HOST } = process.env; diff --git a/test/manual/search-index-management.prose.test.ts b/test/manual/search-index-management.prose.test.ts index 282158f54b4..8cdee809515 100644 --- a/test/manual/search-index-management.prose.test.ts +++ b/test/manual/search-index-management.prose.test.ts @@ -4,13 +4,7 @@ import { Readable } from 'stream'; import { clearTimeout, setTimeout as setTimeoutCb } from 'timers'; import { setInterval } from 'timers/promises'; -import { - type Collection, - type Document, - type MongoClient, - ObjectId, - ReadConcern -} from '../mongodb'; +import { type Collection, type Document, type MongoClient, ObjectId, ReadConcern } from '../../src'; class TimeoutController extends AbortController { timeoutId: NodeJS.Timeout; diff --git a/test/manual/socks5.test.ts b/test/manual/socks5.test.ts index 184514c5dd7..16e363152bf 100644 --- a/test/manual/socks5.test.ts +++ b/test/manual/socks5.test.ts @@ -1,7 +1,8 @@ import { expect } from 'chai'; import ConnectionString from 'mongodb-connection-string-url'; -import { LEGACY_HELLO_COMMAND, MongoClient, MongoParseError } from '../mongodb'; +import { MongoClient, MongoParseError } from '../../src'; +import { LEGACY_HELLO_COMMAND } from '../../src/constants'; /** * The SOCKS5_CONFIG environment variable is either a JSON 4-tuple diff --git a/test/manual/tls_support.test.ts b/test/manual/tls_support.test.ts index 8f67e8d9a31..4db67b1203c 100644 --- a/test/manual/tls_support.test.ts +++ b/test/manual/tls_support.test.ts @@ -6,12 +6,8 @@ import { promises as fs } from 'fs'; import ConnectionString from 'mongodb-connection-string-url'; import * as sinon from 'sinon'; -import { - LEGACY_HELLO_COMMAND, - MongoClient, - type MongoClientOptions, - MongoServerSelectionError -} from '../mongodb'; +import { MongoClient, type MongoClientOptions, MongoServerSelectionError } from '../../src'; +import { LEGACY_HELLO_COMMAND } from '../../src/constants'; const REQUIRED_ENV = ['MONGODB_URI', 'TLS_KEY_FILE', 'TLS_CA_FILE', 'TLS_CRL_FILE']; From 60b60cbbd42a76527f2948a8957ab4179df4978f Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 16:08:50 +0200 Subject: [PATCH 18/41] test(NODE-7179): migrate test/spec/* --- test/spec/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/spec/index.ts b/test/spec/index.ts index 221d6671893..fc43b748715 100644 --- a/test/spec/index.ts +++ b/test/spec/index.ts @@ -1,7 +1,7 @@ import * as fs from 'fs'; import * as path from 'path'; -import { EJSON } from '../mongodb'; +import { EJSON } from '../../src/bson'; function hasDuplicates(testArray) { const testNames = testArray.map(test => test.description); From bea7dd1b6d9fbcf702a117928b93b39efbb78a3f Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 16:51:01 +0200 Subject: [PATCH 19/41] test(NODE-7179): migrate test/types/* --- test/types/admin.test-d.ts | 2 +- test/types/basic_schema.test-d.ts | 2 +- test/types/bson.test-d.ts | 2 +- test/types/change_stream.test-d.ts | 2 +- test/types/client-side-encryption.test-d.ts | 2 +- test/types/client_bulk_write.test-d.ts | 2 +- .../bulk/bulk-operation-base.test-d.ts | 5 +-- test/types/community/client.test-d.ts | 2 +- .../community/collection/aggregate.test-d.ts | 2 +- .../community/collection/bulkWrite.test-d.ts | 2 +- .../community/collection/count.test-d.ts | 2 +- .../community/collection/distinct.test-d.ts | 2 +- .../community/collection/updateX.test-d.ts | 2 +- test/types/community/cursor.test-d.ts | 2 +- .../community/db/createCollection.test-d.ts | 2 +- test/types/community/transaction.test-d.ts | 2 +- test/types/connection.test-d.ts | 4 +- test/types/connection_pool_events.test-d.ts | 2 +- test/types/encryption.test-d.ts | 2 +- test/types/enum.test-d.ts | 2 +- test/types/example_schemas.ts | 2 +- test/types/helper_types.test-d.ts | 2 +- test/types/indexed_schema.test-d.ts | 2 +- test/types/indexes_test-d.ts | 8 +++- test/types/list_collections.test-d.ts | 2 +- test/types/mongodb.test-d.ts | 39 ++++++++++++------- test/types/schema_helpers.test-d.ts | 4 +- test/types/sessions.test-d.ts | 2 +- test/types/sort.test-d.ts | 2 +- test/types/type_errors.test-d.ts | 2 +- test/types/union_schema.test-d.ts | 2 +- test/types/write_concern.test-d.ts | 2 +- 32 files changed, 65 insertions(+), 49 deletions(-) diff --git a/test/types/admin.test-d.ts b/test/types/admin.test-d.ts index 180dbca9e69..b109391ce90 100644 --- a/test/types/admin.test-d.ts +++ b/test/types/admin.test-d.ts @@ -1,6 +1,6 @@ import { expectType } from 'tsd'; -import { type Document, MongoClient } from '../mongodb'; +import { type Document, MongoClient } from '../../src'; const client = new MongoClient(''); const admin = client.db().admin(); diff --git a/test/types/basic_schema.test-d.ts b/test/types/basic_schema.test-d.ts index d066257cbab..e45f5fc23d7 100644 --- a/test/types/basic_schema.test-d.ts +++ b/test/types/basic_schema.test-d.ts @@ -1,6 +1,6 @@ import { expectAssignable, expectNotAssignable, expectNotType, expectType } from 'tsd'; -import { Collection, Db, type Document, type InferIdType, MongoClient, ObjectId } from '../mongodb'; +import { Collection, Db, type Document, type InferIdType, MongoClient, ObjectId } from '../../src'; const db = new Db(new MongoClient(''), ''); diff --git a/test/types/bson.test-d.ts b/test/types/bson.test-d.ts index b011e9369c2..1ae3b9817b2 100644 --- a/test/types/bson.test-d.ts +++ b/test/types/bson.test-d.ts @@ -1,6 +1,6 @@ import { expectType } from 'tsd'; -import type { BSONSerializeOptions, Document } from '../mongodb'; +import type { BSONSerializeOptions, Document } from '../../src'; const options: BSONSerializeOptions = {}; diff --git a/test/types/change_stream.test-d.ts b/test/types/change_stream.test-d.ts index ea689a61ea5..b76eec9f772 100644 --- a/test/types/change_stream.test-d.ts +++ b/test/types/change_stream.test-d.ts @@ -27,7 +27,7 @@ import type { ServerSessionId, Timestamp, UpdateDescription -} from '../mongodb'; +} from '../../src'; declare const changeStreamOptions: ChangeStreamOptions; type ChangeStreamOperationType = diff --git a/test/types/client-side-encryption.test-d.ts b/test/types/client-side-encryption.test-d.ts index 3ed3d184bf4..0845276b43f 100644 --- a/test/types/client-side-encryption.test-d.ts +++ b/test/types/client-side-encryption.test-d.ts @@ -10,7 +10,7 @@ import type { KMSProviders, RangeOptions } from '../..'; -import { Binary, type ClientEncryptionDataKeyProvider } from '../mongodb'; +import { Binary, type ClientEncryptionDataKeyProvider } from '../../src'; type RequiredCreateEncryptedCollectionSettings = Parameters< ClientEncryption['createEncryptedCollection'] diff --git a/test/types/client_bulk_write.test-d.ts b/test/types/client_bulk_write.test-d.ts index 834b68b19cd..7079a9804d7 100644 --- a/test/types/client_bulk_write.test-d.ts +++ b/test/types/client_bulk_write.test-d.ts @@ -15,7 +15,7 @@ import { type UpdateFilter, type UUID, type WithoutId -} from '../mongodb'; +} from '../../src'; declare const client: MongoClient; type Book = { title: string; released: Date }; diff --git a/test/types/community/bulk/bulk-operation-base.test-d.ts b/test/types/community/bulk/bulk-operation-base.test-d.ts index 3f1fdfc9282..a2f97d811d3 100644 --- a/test/types/community/bulk/bulk-operation-base.test-d.ts +++ b/test/types/community/bulk/bulk-operation-base.test-d.ts @@ -1,16 +1,15 @@ import { expectType } from 'tsd'; import { - Batch, type BatchType, - BulkOperationBase, type BulkWriteOptions, type BulkWriteResult, type DeleteStatement, type Document, MongoClient, type UpdateStatement -} from '../../../mongodb'; +} from '../../../../src'; +import { Batch, BulkOperationBase } from '../../../../src/bulk/common'; const client = new MongoClient(''); const db = client.db('test'); diff --git a/test/types/community/client.test-d.ts b/test/types/community/client.test-d.ts index 0f290239aa8..c6ae23b91fd 100644 --- a/test/types/community/client.test-d.ts +++ b/test/types/community/client.test-d.ts @@ -9,7 +9,7 @@ import { ReadPreference, type ReadPreferenceMode, type W -} from '../../mongodb'; +} from '../../../src'; // TODO(NODE-3348): Improve the tests to expectType assertions diff --git a/test/types/community/collection/aggregate.test-d.ts b/test/types/community/collection/aggregate.test-d.ts index 840443f74d9..9625f4005ac 100644 --- a/test/types/community/collection/aggregate.test-d.ts +++ b/test/types/community/collection/aggregate.test-d.ts @@ -1,6 +1,6 @@ import { expectNotType, expectType } from 'tsd'; -import { type AggregationCursor, type Document, MongoClient } from '../../../mongodb'; +import { type AggregationCursor, type Document, MongoClient } from '../../../../src'; // collection.aggregate tests const client = new MongoClient(''); diff --git a/test/types/community/collection/bulkWrite.test-d.ts b/test/types/community/collection/bulkWrite.test-d.ts index 49c36539d63..1813ec2627d 100644 --- a/test/types/community/collection/bulkWrite.test-d.ts +++ b/test/types/community/collection/bulkWrite.test-d.ts @@ -6,7 +6,7 @@ import { type Document, MongoClient, ObjectId -} from '../../../mongodb'; +} from '../../../../src'; // TODO(NODE-3347): Improve these tests to use more expect assertions diff --git a/test/types/community/collection/count.test-d.ts b/test/types/community/collection/count.test-d.ts index 70d183e2bc7..2c1a3cecc8b 100644 --- a/test/types/community/collection/count.test-d.ts +++ b/test/types/community/collection/count.test-d.ts @@ -1,6 +1,6 @@ import { expectDeprecated, expectType } from 'tsd'; -import { MongoClient } from '../../../mongodb'; +import { MongoClient } from '../../../../src'; // test collection.countDocuments const client = new MongoClient(''); diff --git a/test/types/community/collection/distinct.test-d.ts b/test/types/community/collection/distinct.test-d.ts index 4292be71c6b..ebe0e13697a 100644 --- a/test/types/community/collection/distinct.test-d.ts +++ b/test/types/community/collection/distinct.test-d.ts @@ -1,6 +1,6 @@ import { expectType } from 'tsd'; -import { MongoClient, type ObjectId } from '../../../mongodb'; +import { MongoClient, type ObjectId } from '../../../../src'; // test collection.distinct functions interface Collection { diff --git a/test/types/community/collection/updateX.test-d.ts b/test/types/community/collection/updateX.test-d.ts index 1a6926facdc..b2aaa03e445 100644 --- a/test/types/community/collection/updateX.test-d.ts +++ b/test/types/community/collection/updateX.test-d.ts @@ -19,7 +19,7 @@ import { type StrictUpdateFilter, type Timestamp, type UpdateFilter -} from '../../../mongodb'; +} from '../../../../src'; // MatchKeysAndValues - for basic mapping keys to their values, restricts that key types must be the same but optional, and permit dot array notation expectAssignable>({ diff --git a/test/types/community/cursor.test-d.ts b/test/types/community/cursor.test-d.ts index 6be7a85776c..3fb171b8da6 100644 --- a/test/types/community/cursor.test-d.ts +++ b/test/types/community/cursor.test-d.ts @@ -1,7 +1,7 @@ import type { Readable } from 'stream'; import { expectNotType, expectType } from 'tsd'; -import { Db, type Document, type FindCursor, MongoClient } from '../../mongodb'; +import { Db, type Document, type FindCursor, MongoClient } from '../../../src'; // TODO(NODE-3346): Improve these tests to use expect assertions more diff --git a/test/types/community/db/createCollection.test-d.ts b/test/types/community/db/createCollection.test-d.ts index 9194cbf6012..5f167ae23bb 100644 --- a/test/types/community/db/createCollection.test-d.ts +++ b/test/types/community/db/createCollection.test-d.ts @@ -5,7 +5,7 @@ import { type CreateCollectionOptions, MongoClient, type ObjectId -} from '../../../mongodb'; +} from '../../../../src'; const client = new MongoClient(''); const db = client.db('test'); diff --git a/test/types/community/transaction.test-d.ts b/test/types/community/transaction.test-d.ts index 949d5a17f6f..01d4cf98a82 100644 --- a/test/types/community/transaction.test-d.ts +++ b/test/types/community/transaction.test-d.ts @@ -1,6 +1,6 @@ import { expectType } from 'tsd'; -import { type ClientSession, type InsertOneResult, MongoClient, ReadConcern } from '../../mongodb'; +import { type ClientSession, type InsertOneResult, MongoClient, ReadConcern } from '../../../src'; // TODO(NODE-3345): Improve these tests to use expect assertions more diff --git a/test/types/connection.test-d.ts b/test/types/connection.test-d.ts index 25953c5e16f..0fef49ff680 100644 --- a/test/types/connection.test-d.ts +++ b/test/types/connection.test-d.ts @@ -1,6 +1,8 @@ import { expectError, expectType } from 'tsd'; -import { type Connection, type Document, MongoDBResponse, ns } from '../mongodb'; +import { type Connection, type Document } from '../../src'; +import { MongoDBResponse } from '../../src/cmap/wire_protocol/responses'; +import { ns } from '../../src/utils'; declare const connection: Connection; diff --git a/test/types/connection_pool_events.test-d.ts b/test/types/connection_pool_events.test-d.ts index 348d5c52d30..5e9acc7d0c6 100644 --- a/test/types/connection_pool_events.test-d.ts +++ b/test/types/connection_pool_events.test-d.ts @@ -1,7 +1,7 @@ import { once } from 'events'; import { expectType } from 'tsd'; -import { type ConnectionPoolCreatedEvent, MongoClient } from '../mongodb'; +import { type ConnectionPoolCreatedEvent, MongoClient } from '../../src'; const client: MongoClient = new MongoClient(''); const p = once(client, 'connectionPoolCreated'); diff --git a/test/types/encryption.test-d.ts b/test/types/encryption.test-d.ts index a22400e6190..f32006b763f 100644 --- a/test/types/encryption.test-d.ts +++ b/test/types/encryption.test-d.ts @@ -1,6 +1,6 @@ import { expectAssignable } from 'tsd'; -import type { AutoEncryptionOptions } from '../mongodb'; +import type { AutoEncryptionOptions } from '../../src'; // Empty credentials support on each provider expectAssignable({ diff --git a/test/types/enum.test-d.ts b/test/types/enum.test-d.ts index 537ec202da2..ed07529bfc6 100644 --- a/test/types/enum.test-d.ts +++ b/test/types/enum.test-d.ts @@ -18,7 +18,7 @@ import { ServerApiVersion, ServerType, TopologyType -} from '../mongodb'; +} from '../../src'; const num: number = Math.random(); diff --git a/test/types/example_schemas.ts b/test/types/example_schemas.ts index 7ce9fa9f038..08368dc1501 100644 --- a/test/types/example_schemas.ts +++ b/test/types/example_schemas.ts @@ -1,4 +1,4 @@ -import type { Double, Int32 } from '../mongodb'; +import type { Double, Int32 } from '../../src'; export type MediaType = 'movie' | 'tv' | 'web series'; diff --git a/test/types/helper_types.test-d.ts b/test/types/helper_types.test-d.ts index 26dfb5ca5f7..489120fd219 100644 --- a/test/types/helper_types.test-d.ts +++ b/test/types/helper_types.test-d.ts @@ -16,7 +16,7 @@ import { type NumericType, type OneOrMore, type OnlyFieldsOfType -} from '../mongodb'; +} from '../../src'; expectType>(true); expectNotType>(true); diff --git a/test/types/indexed_schema.test-d.ts b/test/types/indexed_schema.test-d.ts index 97e05edde30..ee47e3f6da4 100644 --- a/test/types/indexed_schema.test-d.ts +++ b/test/types/indexed_schema.test-d.ts @@ -1,6 +1,6 @@ import { expectError, expectNotType, expectType } from 'tsd'; -import { Collection, Db, MongoClient, ObjectId } from '../mongodb'; +import { Collection, Db, MongoClient, ObjectId } from '../../src'; const db = new Db(new MongoClient(''), ''); diff --git a/test/types/indexes_test-d.ts b/test/types/indexes_test-d.ts index 8d68a34f30f..57cb9277feb 100644 --- a/test/types/indexes_test-d.ts +++ b/test/types/indexes_test-d.ts @@ -1,7 +1,11 @@ import { expectAssignable, expectType } from 'tsd'; -import { type IndexInformationOptions, MongoClient } from '../../src'; -import { type IndexDescriptionCompact, type IndexDescriptionInfo } from '../mongodb'; +import { + type IndexDescriptionCompact, + type IndexDescriptionInfo, + type IndexInformationOptions, + MongoClient +} from '../../src'; const client = new MongoClient(''); const db = client.db('test'); diff --git a/test/types/list_collections.test-d.ts b/test/types/list_collections.test-d.ts index 06d2b471d9b..42438302f88 100644 --- a/test/types/list_collections.test-d.ts +++ b/test/types/list_collections.test-d.ts @@ -1,6 +1,6 @@ import { expectNotType, expectType } from 'tsd'; -import { type CollectionInfo, type ListCollectionsCursor, MongoClient } from '../mongodb'; +import { type CollectionInfo, type ListCollectionsCursor, MongoClient } from '../../src'; const db = new MongoClient('').db(); diff --git a/test/types/mongodb.test-d.ts b/test/types/mongodb.test-d.ts index 4037a18159d..f48b03e169c 100644 --- a/test/types/mongodb.test-d.ts +++ b/test/types/mongodb.test-d.ts @@ -1,22 +1,33 @@ import type { Document } from 'bson'; import { expectDeprecated, expectError, expectNotDeprecated, expectType } from 'tsd'; -import type { WithId, WriteConcern, WriteConcernSettings } from '../../src'; -import * as MongoDBDriver from '../../src'; import { + type AbstractCursor, + type AbstractCursorOptions, + type AggregateOptions, type AggregationCursor, type ChangeStreamDocument, Collection, + type CommandOperationOptions, + type CountOptions, + type EstimatedDocumentCountOptions, FindCursor, - MongoClient -} from '../mongodb'; + MongoClient, + type MongoClientOptions, + ObjectId, + type RunCommandCursor, + type TransactionOptions, + type WithId, + type WriteConcern, + type WriteConcernSettings +} from '../../src'; // We wish to keep these APIs but continue to ensure they are marked as deprecated. expectDeprecated(Collection.prototype.count); expectDeprecated(FindCursor.prototype.count); -expectNotDeprecated(MongoDBDriver.ObjectId); +expectNotDeprecated(ObjectId); -declare const options: MongoDBDriver.MongoClientOptions; +declare const options: MongoClientOptions; expectDeprecated(options.w); expectDeprecated(options.journal); expectDeprecated(options.wtimeoutMS); @@ -28,29 +39,29 @@ expectNotDeprecated(options.connectTimeoutMS); expectType(options.writeConcern); -declare const estimatedDocumentCountOptions: MongoDBDriver.EstimatedDocumentCountOptions; +declare const estimatedDocumentCountOptions: EstimatedDocumentCountOptions; // TODO(NODE-6491): expectDeprecated(estimatedDocumentCountOptions.maxTimeMS); -declare const countOptions: MongoDBDriver.CountOptions; +declare const countOptions: CountOptions; // TODO(NODE-6491): expectDeprecated(countOptions.maxTimeMS); -declare const commandOptions: MongoDBDriver.CommandOperationOptions; +declare const commandOptions: CommandOperationOptions; // TODO(NODE-6491): expectDeprecated(commandOptions.maxTimeMS); -declare const aggregateOptions: MongoDBDriver.AggregateOptions; +declare const aggregateOptions: AggregateOptions; // TODO(NODE-6491): expectDeprecated(aggregateOptions.maxTimeMS); -declare const runCommandCursor: MongoDBDriver.RunCommandCursor; +declare const runCommandCursor: RunCommandCursor; // TODO(NODE-6491): expectDeprecated(runCommandCursor.setMaxTimeMS); // TODO(NODE-6491): expectDeprecated(runCommandCursor.maxTimeMS); -declare const cursorOptions: MongoDBDriver.AbstractCursorOptions; +declare const cursorOptions: AbstractCursorOptions; // TODO(NODE-6491): expectDeprecated(cursorOptions.maxTimeMS); -declare const abstractCursor: MongoDBDriver.AbstractCursor; +declare const abstractCursor: AbstractCursor; // TODO(NODE-6491): expectDeprecated(abstractCursor.maxTimeMS); -declare const txnOptions: MongoDBDriver.TransactionOptions; +declare const txnOptions: TransactionOptions; // TODO(NODE-6491): expectDeprecated(txnOptions.maxCommitTimeMS); interface TSchema extends Document { diff --git a/test/types/schema_helpers.test-d.ts b/test/types/schema_helpers.test-d.ts index 6c09ee07900..8c7ca305661 100644 --- a/test/types/schema_helpers.test-d.ts +++ b/test/types/schema_helpers.test-d.ts @@ -1,15 +1,15 @@ import { type Document, ObjectId } from 'bson'; import { expectAssignable, expectError, expectNotType, expectType } from 'tsd'; -import type { Collection } from '../../src'; import type { + Collection, EnhancedOmit, InferIdType, OptionalId, OptionalUnlessRequiredId, WithId, WithoutId -} from '../mongodb'; +} from '../../src'; /** ---------------------------------------------------------------------- * InferIdType diff --git a/test/types/sessions.test-d.ts b/test/types/sessions.test-d.ts index 0945f26093b..ea39fb6b714 100644 --- a/test/types/sessions.test-d.ts +++ b/test/types/sessions.test-d.ts @@ -9,7 +9,7 @@ import { ReadConcern, ReadConcernLevel, type Timestamp -} from '../mongodb'; +} from '../../src'; // test mapped cursor types const client = new MongoClient(''); diff --git a/test/types/sort.test-d.ts b/test/types/sort.test-d.ts index 63f7bc3efe7..87f91283349 100644 --- a/test/types/sort.test-d.ts +++ b/test/types/sort.test-d.ts @@ -6,7 +6,7 @@ import { type MongoClient, ObjectId, type Sort -} from '../mongodb'; +} from '../../src'; const sortFieldName: Sort = 'a'; const sortFieldNameObject: Sort = { a: 1, b: -1 }; diff --git a/test/types/type_errors.test-d.ts b/test/types/type_errors.test-d.ts index f29fa8540ee..5335785ad4e 100644 --- a/test/types/type_errors.test-d.ts +++ b/test/types/type_errors.test-d.ts @@ -1,4 +1,4 @@ -import { MongoClient } from '../mongodb'; +import { MongoClient } from '../../src'; /** * This test file should contain examples of known compilation errors diff --git a/test/types/union_schema.test-d.ts b/test/types/union_schema.test-d.ts index efdbf19e6fb..83b4d7c8c9f 100644 --- a/test/types/union_schema.test-d.ts +++ b/test/types/union_schema.test-d.ts @@ -1,6 +1,6 @@ import { expectAssignable, expectError, expectNotAssignable, expectNotType, expectType } from 'tsd'; -import { type Collection, type Document, ObjectId, type WithId } from '../mongodb'; +import { type Collection, type Document, ObjectId, type WithId } from '../../src'; type InsertOneFirstParam = Parameters['insertOne']>[0]; diff --git a/test/types/write_concern.test-d.ts b/test/types/write_concern.test-d.ts index 2b10824a1c6..8564eee5c87 100644 --- a/test/types/write_concern.test-d.ts +++ b/test/types/write_concern.test-d.ts @@ -6,7 +6,7 @@ import type { ListCollectionsOptions, ListIndexesOptions, WriteConcern -} from '../mongodb'; +} from '../../src'; expectNotAssignable({ writeConcern: { w: 0 } }); expectNotAssignable({ writeConcern: { w: 0 } }); From c20700b59b03556f82b002ee63f628656c0c23ab Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 18:42:59 +0200 Subject: [PATCH 20/41] test(NODE-7179): migrate test/tools/* --- test/tools/cmap_spec_runner.ts | 12 ++++++------ test/tools/common.js | 12 ++++++------ test/tools/mongodb-mock/index.js | 2 +- test/tools/runner/config.ts | 4 ++-- test/tools/runner/filters/api_version_filter.ts | 2 +- .../tools/runner/filters/client_encryption_filter.ts | 2 +- test/tools/runner/filters/crypt_shared_filter.ts | 2 +- test/tools/runner/filters/filter.ts | 2 +- test/tools/runner/filters/mongodb_topology_filter.ts | 2 +- test/tools/runner/filters/mongodb_version_filter.ts | 2 +- test/tools/runner/hooks/configuration.ts | 2 +- test/tools/runner/hooks/leak_checker.ts | 3 ++- test/tools/runner/hooks/legacy_crud_shims.ts | 2 +- test/tools/spec-runner/context.js | 2 +- .../unified-spec-runner/entity_event_registry.ts | 2 +- test/tools/unified-spec-runner/operations.ts | 4 ++-- test/tools/uri_spec_runner.ts | 2 +- test/tools/utils.ts | 8 ++++---- 18 files changed, 34 insertions(+), 33 deletions(-) diff --git a/test/tools/cmap_spec_runner.ts b/test/tools/cmap_spec_runner.ts index 28230359587..6ded70923bc 100644 --- a/test/tools/cmap_spec_runner.ts +++ b/test/tools/cmap_spec_runner.ts @@ -4,16 +4,16 @@ import { clearTimeout, setTimeout } from 'timers'; import { inspect } from 'util'; import { - CMAP_EVENTS, type Connection, - ConnectionPool, type ConnectionPoolOptions, type HostAddress, type MongoClient, - type Server, - shuffle, - TimeoutContext -} from '../mongodb'; + type Server +} from '../../src'; +import { ConnectionPool } from '../../src/cmap/connection_pool'; +import { CMAP_EVENTS } from '../../src/constants'; +import { TimeoutContext } from '../../src/timeout'; +import { shuffle } from '../../src/utils'; import { isAnyRequirementSatisfied } from './unified-spec-runner/unified-utils'; import { type FailCommandFailPoint, sleep } from './utils'; diff --git a/test/tools/common.js b/test/tools/common.js index 39f9e87e827..a2b15dd9a00 100644 --- a/test/tools/common.js +++ b/test/tools/common.js @@ -1,13 +1,13 @@ 'use strict'; const mock = require('./mongodb-mock/index'); -const BSON = require('../mongodb'); -const { LEGACY_HELLO_COMMAND } = require('../mongodb'); -const { isHello } = require('../mongodb'); +const { ObjectId, Timestamp, Long, Binary } = require('../../src'); +const { LEGACY_HELLO_COMMAND } = require('../../src/constants'); +const { isHello } = require('../../src/utils'); class ReplSetFixture { constructor() { - this.electionIds = [new BSON.ObjectId(), new BSON.ObjectId()]; + this.electionIds = [new ObjectId(), new ObjectId()]; } uri(dbName) { @@ -121,8 +121,8 @@ class ReplSetFixture { */ function genClusterTime(time) { return { - clusterTime: new BSON.Timestamp(BSON.Long.fromNumber(time, true)), - signature: { hash: new BSON.Binary(Buffer.from('test', 'utf8')), keyId: new BSON.Long(1) } + clusterTime: new Timestamp(Long.fromNumber(time, true)), + signature: { hash: new Binary(Buffer.from('test', 'utf8')), keyId: new Long(1) } }; } diff --git a/test/tools/mongodb-mock/index.js b/test/tools/mongodb-mock/index.js index c05823e5177..7a6f6240504 100644 --- a/test/tools/mongodb-mock/index.js +++ b/test/tools/mongodb-mock/index.js @@ -1,6 +1,6 @@ const fs = require('fs'); const { MockServer } = require('./src/server.js'); -const { LEGACY_HELLO_COMMAND } = require('../../mongodb'); +const { LEGACY_HELLO_COMMAND } = require('../../../src/constants'); let mockServers = []; diff --git a/test/tools/runner/config.ts b/test/tools/runner/config.ts index 69314408a3f..01dd893e39e 100644 --- a/test/tools/runner/config.ts +++ b/test/tools/runner/config.ts @@ -11,7 +11,6 @@ import * as url from 'url'; import { type AuthMechanism, Double, - HostAddress, Long, MongoClient, type MongoClientOptions, @@ -19,7 +18,8 @@ import { type ServerApi, TopologyType, type WriteConcernSettings -} from '../../mongodb'; +} from '../../../src'; +import { HostAddress } from '../../../src/utils'; import { getEnvironmentalOptions } from '../utils'; import { type Filter } from './filters/filter'; import { flakyTests } from './flaky'; diff --git a/test/tools/runner/filters/api_version_filter.ts b/test/tools/runner/filters/api_version_filter.ts index 0c4b376ef17..4b06c958de3 100755 --- a/test/tools/runner/filters/api_version_filter.ts +++ b/test/tools/runner/filters/api_version_filter.ts @@ -1,4 +1,4 @@ -import { type MongoClient } from '../../../mongodb'; +import { type MongoClient } from '../../../../src'; import { Filter } from './filter'; /** diff --git a/test/tools/runner/filters/client_encryption_filter.ts b/test/tools/runner/filters/client_encryption_filter.ts index b9f87350413..8f51f3ab2f1 100644 --- a/test/tools/runner/filters/client_encryption_filter.ts +++ b/test/tools/runner/filters/client_encryption_filter.ts @@ -3,8 +3,8 @@ import { dirname, resolve } from 'path'; import * as process from 'process'; import { satisfies } from 'semver'; +import { type MongoClient } from '../../../../src'; import { kmsCredentialsPresent } from '../../../csfle-kms-providers'; -import { type MongoClient } from '../../../mongodb'; import { Filter } from './filter'; /** diff --git a/test/tools/runner/filters/crypt_shared_filter.ts b/test/tools/runner/filters/crypt_shared_filter.ts index bf824357081..2879d55e09f 100644 --- a/test/tools/runner/filters/crypt_shared_filter.ts +++ b/test/tools/runner/filters/crypt_shared_filter.ts @@ -1,4 +1,4 @@ -import { type AutoEncrypter, MongoClient } from '../../../mongodb'; +import { type AutoEncrypter, MongoClient } from '../../../../src'; import { getEncryptExtraOptions } from '../../utils'; import { Filter } from './filter'; diff --git a/test/tools/runner/filters/filter.ts b/test/tools/runner/filters/filter.ts index 6251cf44c8c..bc7f6512bbe 100644 --- a/test/tools/runner/filters/filter.ts +++ b/test/tools/runner/filters/filter.ts @@ -1,4 +1,4 @@ -import { type MongoClient } from '../../../mongodb'; +import { type MongoClient } from '../../../../src'; export abstract class Filter { async initializeFilter(_client: MongoClient, _context: Record): Promise { diff --git a/test/tools/runner/filters/mongodb_topology_filter.ts b/test/tools/runner/filters/mongodb_topology_filter.ts index 429b028b8b3..98f53155551 100755 --- a/test/tools/runner/filters/mongodb_topology_filter.ts +++ b/test/tools/runner/filters/mongodb_topology_filter.ts @@ -1,4 +1,4 @@ -import { type MongoClient, TopologyType } from '../../../mongodb'; +import { type MongoClient, TopologyType } from '../../../../src'; import { Filter } from './filter'; /** diff --git a/test/tools/runner/filters/mongodb_version_filter.ts b/test/tools/runner/filters/mongodb_version_filter.ts index 8d1eb6307ff..93fc6ad86a4 100755 --- a/test/tools/runner/filters/mongodb_version_filter.ts +++ b/test/tools/runner/filters/mongodb_version_filter.ts @@ -1,6 +1,6 @@ import * as semver from 'semver'; -import { type MongoClient } from '../../../mongodb'; +import { type MongoClient } from '../../../../src'; import { Filter } from './filter'; /** diff --git a/test/tools/runner/hooks/configuration.ts b/test/tools/runner/hooks/configuration.ts index e5c6547902e..9fdc5c6c91c 100644 --- a/test/tools/runner/hooks/configuration.ts +++ b/test/tools/runner/hooks/configuration.ts @@ -5,7 +5,7 @@ require('source-map-support').install({ hookRequire: true }); -import { MongoClient } from '../../../mongodb'; +import { MongoClient } from '../../../../src'; import { AlpineTestConfiguration, AstrolabeTestConfiguration, TestConfiguration } from '../config'; import { getEnvironmentalOptions } from '../../utils'; import * as mock from '../../mongodb-mock/index'; diff --git a/test/tools/runner/hooks/leak_checker.ts b/test/tools/runner/hooks/leak_checker.ts index 2b35801969c..b6171d7a204 100644 --- a/test/tools/runner/hooks/leak_checker.ts +++ b/test/tools/runner/hooks/leak_checker.ts @@ -3,7 +3,8 @@ import { expect } from 'chai'; import * as chalk from 'chalk'; import * as net from 'net'; -import { MongoClient, ServerSessionPool } from '../../../mongodb'; +import { MongoClient } from '../../../../src'; +import { ServerSessionPool } from '../../../../src/sessions'; class LeakChecker { static originalAcquire: typeof ServerSessionPool.prototype.acquire; diff --git a/test/tools/runner/hooks/legacy_crud_shims.ts b/test/tools/runner/hooks/legacy_crud_shims.ts index 28da4349658..357b0a6d503 100644 --- a/test/tools/runner/hooks/legacy_crud_shims.ts +++ b/test/tools/runner/hooks/legacy_crud_shims.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { Collection } from '../../../mongodb'; +import { Collection } from '../../../../src'; // Setup legacy shims for tests that use removed or changed APIs const legacyUsageCounts = { diff --git a/test/tools/spec-runner/context.js b/test/tools/spec-runner/context.js index 2c82fefa552..4c8383e001c 100644 --- a/test/tools/spec-runner/context.js +++ b/test/tools/spec-runner/context.js @@ -2,7 +2,7 @@ const { expect } = require('chai'); const { setTimeout } = require('timers'); const { resolveConnectionString } = require('./utils'); -const { ns } = require('../../mongodb'); +const { ns } = require('../../../src/utils'); const { extractAuthFromConnectionString } = require('../utils'); class Thread { diff --git a/test/tools/unified-spec-runner/entity_event_registry.ts b/test/tools/unified-spec-runner/entity_event_registry.ts index d62e1069886..9b392c0be31 100644 --- a/test/tools/unified-spec-runner/entity_event_registry.ts +++ b/test/tools/unified-spec-runner/entity_event_registry.ts @@ -15,7 +15,7 @@ import { CONNECTION_POOL_CREATED, CONNECTION_POOL_READY, CONNECTION_READY -} from '../../mongodb'; +} from '../../../src/constants'; import { type EntitiesMap, type UnifiedMongoClient } from './entities'; import { type ClientEntity } from './schema'; diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 2e87b099ae9..7605cc4112f 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -16,13 +16,13 @@ import { MongoError, ReadConcern, ReadPreference, - SERVER_DESCRIPTION_CHANGED, ServerType, type TopologyDescription, type TopologyType, type TransactionOptions, WriteConcern -} from '../../mongodb'; +} from '../../../src'; +import { SERVER_DESCRIPTION_CHANGED } from '../../../src/constants'; import { sleep } from '../../tools/utils'; import { type TestConfiguration } from '../runner/config'; import { EntitiesMap } from './entities'; diff --git a/test/tools/uri_spec_runner.ts b/test/tools/uri_spec_runner.ts index bb6f44459a1..78d4f45aef9 100644 --- a/test/tools/uri_spec_runner.ts +++ b/test/tools/uri_spec_runner.ts @@ -6,7 +6,7 @@ import { MongoInvalidArgumentError, MongoParseError, MongoRuntimeError -} from '../mongodb'; +} from '../../src'; type HostObject = { type: 'ipv4' | 'ip_literal' | 'hostname' | 'unix'; diff --git a/test/tools/utils.ts b/test/tools/utils.ts index f90d8c50cc8..7720b3167d6 100644 --- a/test/tools/utils.ts +++ b/test/tools/utils.ts @@ -17,11 +17,11 @@ import { type HostAddress, MongoClient, type MongoClientOptions, - now, - OP_MSG, - Topology, type TopologyOptions -} from '../mongodb'; +} from '../../src'; +import { OP_MSG } from '../../src/cmap/wire_protocol/constants'; +import { Topology } from '../../src/sdam/topology'; +import { now } from '../../src/utils'; import { type TestConfiguration } from './runner/config'; export function ensureCalledWith(stub: any, args: any[]) { From 3ed3ae5c70c1f585ae41257f3cd3351411bc9e24 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 18:47:45 +0200 Subject: [PATCH 21/41] test(NODE-7179): migrate test/integration/collection-management/* --- .../collection-management/collection_db_management.test.ts | 2 +- test/integration/collection-management/view.test.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/collection-management/collection_db_management.test.ts b/test/integration/collection-management/collection_db_management.test.ts index 327c7f4113a..a53fc90a381 100644 --- a/test/integration/collection-management/collection_db_management.test.ts +++ b/test/integration/collection-management/collection_db_management.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { Collection, type Db, type MongoClient, ObjectId } from '../../mongodb'; +import { Collection, type Db, type MongoClient, ObjectId } from '../../../src'; describe('Collection Management and Db Management', function () { let client: MongoClient; diff --git a/test/integration/collection-management/view.test.ts b/test/integration/collection-management/view.test.ts index 88b30d03f3e..ec645f38fa3 100644 --- a/test/integration/collection-management/view.test.ts +++ b/test/integration/collection-management/view.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { type CollectionInfo, type Db, type MongoClient } from '../../mongodb'; +import { type CollectionInfo, type Db, type MongoClient } from '../../../src'; describe('Views', function () { let client: MongoClient; From 863206b29d6cba2f0e7eade2db271c35d14686a3 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 18:58:41 +0200 Subject: [PATCH 22/41] test(NODE-7179): migrate test/integration/crud/* --- .../crud/abstract_operation.test.ts | 241 ++++++++++-------- .../crud/client_bulk_write.test.ts | 8 +- test/integration/crud/crud.prose.test.ts | 4 +- test/integration/crud/explain.test.ts | 2 +- test/integration/crud/find_and_modify.test.ts | 2 +- test/integration/crud/maxTimeMS.test.ts | 2 +- 6 files changed, 150 insertions(+), 109 deletions(-) diff --git a/test/integration/crud/abstract_operation.test.ts b/test/integration/crud/abstract_operation.test.ts index 74d3c88d4e7..b28692d7b7b 100644 --- a/test/integration/crud/abstract_operation.test.ts +++ b/test/integration/crud/abstract_operation.test.ts @@ -1,251 +1,292 @@ import { expect } from 'chai'; -import { Long } from '../../mongodb'; -import * as mongodb from '../../mongodb'; +import { + type AbstractOperation, + type Admin, + type Collection, + type Db, + Long, + type MongoClient, + type Server +} from '../../../src'; +import { AggregateOperation } from '../../../src/operations/aggregate'; +import { CountOperation } from '../../../src/operations/count'; +import { CreateCollectionOperation } from '../../../src/operations/create_collection'; +import { + DeleteManyOperation, + DeleteOneOperation, + DeleteOperation +} from '../../../src/operations/delete'; +import { DistinctOperation } from '../../../src/operations/distinct'; +import { DropCollectionOperation, DropDatabaseOperation } from '../../../src/operations/drop'; +import { EstimatedDocumentCountOperation } from '../../../src/operations/estimated_document_count'; +import { FindOperation } from '../../../src/operations/find'; +import { + FindAndModifyOperation, + FindOneAndDeleteOperation, + FindOneAndReplaceOperation, + FindOneAndUpdateOperation +} from '../../../src/operations/find_and_modify'; +import { GetMoreOperation } from '../../../src/operations/get_more'; +import { + CreateIndexesOperation, + DropIndexOperation, + ListIndexesOperation +} from '../../../src/operations/indexes'; +import { InsertOneOperation, InsertOperation } from '../../../src/operations/insert'; +import { KillCursorsOperation } from '../../../src/operations/kill_cursors'; +import { ListCollectionsOperation } from '../../../src/operations/list_collections'; +import { ListDatabasesOperation } from '../../../src/operations/list_databases'; +import { ProfilingLevelOperation } from '../../../src/operations/profiling_level'; +import { RemoveUserOperation } from '../../../src/operations/remove_user'; +import { RenameOperation } from '../../../src/operations/rename'; +import { RunCommandOperation } from '../../../src/operations/run_command'; +import { CreateSearchIndexesOperation } from '../../../src/operations/search_indexes/create'; +import { DropSearchIndexOperation } from '../../../src/operations/search_indexes/drop'; +import { UpdateSearchIndexOperation } from '../../../src/operations/search_indexes/update'; +import { SetProfilingLevelOperation } from '../../../src/operations/set_profiling_level'; +import { DbStatsOperation } from '../../../src/operations/stats'; +import { + ReplaceOneOperation, + UpdateManyOperation, + UpdateOneOperation, + UpdateOperation +} from '../../../src/operations/update'; +import { ValidateCollectionOperation } from '../../../src/operations/validate_collection'; +import { TimeoutContext } from '../../../src/timeout'; +import { MongoDBNamespace } from '../../../src/utils'; describe('abstract operation', function () { describe('command name getter', function () { interface AbstractOperationSubclasses { - subclassCreator: () => mongodb.AbstractOperation; + subclassCreator: () => AbstractOperation; subclassType: any; correctCommandName: string; } - let client: mongodb.MongoClient; - let db: mongodb.Db; - let admin: mongodb.Admin; - let collection: mongodb.Collection; + let client: MongoClient; + let db: Db; + let admin: Admin; + let collection: Collection; const subclassArray: AbstractOperationSubclasses[] = [ { - subclassCreator: () => - new mongodb.AggregateOperation(collection.fullNamespace, [{ a: 1 }], {}), - subclassType: mongodb.AggregateOperation, + subclassCreator: () => new AggregateOperation(collection.fullNamespace, [{ a: 1 }], {}), + subclassType: AggregateOperation, correctCommandName: 'aggregate' }, { - subclassCreator: () => new mongodb.CountOperation(collection.fullNamespace, { a: 1 }, {}), - subclassType: mongodb.CountOperation, + subclassCreator: () => new CountOperation(collection.fullNamespace, { a: 1 }, {}), + subclassType: CountOperation, correctCommandName: 'count' }, { - subclassCreator: () => new mongodb.CreateCollectionOperation(db, 'name'), - subclassType: mongodb.CreateCollectionOperation, + subclassCreator: () => new CreateCollectionOperation(db, 'name'), + subclassType: CreateCollectionOperation, correctCommandName: 'create' }, { subclassCreator: () => - new mongodb.DeleteOperation(collection.fullNamespace, [{ q: { a: 1 }, limit: 1 }], {}), - subclassType: mongodb.DeleteOperation, + new DeleteOperation(collection.fullNamespace, [{ q: { a: 1 }, limit: 1 }], {}), + subclassType: DeleteOperation, correctCommandName: 'delete' }, { subclassCreator: () => - new mongodb.DeleteOneOperation(collection.fullNamespace, [{ q: { a: 1 }, limit: 1 }], {}), - subclassType: mongodb.DeleteOneOperation, + new DeleteOneOperation(collection.fullNamespace, [{ q: { a: 1 }, limit: 1 }], {}), + subclassType: DeleteOneOperation, correctCommandName: 'delete' }, { subclassCreator: () => - new mongodb.DeleteManyOperation( - collection.fullNamespace, - [{ q: { a: 1 }, limit: 1 }], - {} - ), - subclassType: mongodb.DeleteManyOperation, + new DeleteManyOperation(collection.fullNamespace, [{ q: { a: 1 }, limit: 1 }], {}), + subclassType: DeleteManyOperation, correctCommandName: 'delete' }, { - subclassCreator: () => new mongodb.DistinctOperation(collection, 'a', { a: 1 }), - subclassType: mongodb.DistinctOperation, + subclassCreator: () => new DistinctOperation(collection, 'a', { a: 1 }), + subclassType: DistinctOperation, correctCommandName: 'distinct' }, { - subclassCreator: () => new mongodb.DropCollectionOperation(db, 'collectionName', {}), - subclassType: mongodb.DropCollectionOperation, + subclassCreator: () => new DropCollectionOperation(db, 'collectionName', {}), + subclassType: DropCollectionOperation, correctCommandName: 'drop' }, { - subclassCreator: () => new mongodb.DropDatabaseOperation(db, {}), - subclassType: mongodb.DropDatabaseOperation, + subclassCreator: () => new DropDatabaseOperation(db, {}), + subclassType: DropDatabaseOperation, correctCommandName: 'dropDatabase' }, { - subclassCreator: () => new mongodb.EstimatedDocumentCountOperation(collection, {}), - subclassType: mongodb.EstimatedDocumentCountOperation, + subclassCreator: () => new EstimatedDocumentCountOperation(collection, {}), + subclassType: EstimatedDocumentCountOperation, correctCommandName: 'count' }, { - subclassCreator: () => new mongodb.FindOperation(collection.fullNamespace), - subclassType: mongodb.FindOperation, + subclassCreator: () => new FindOperation(collection.fullNamespace), + subclassType: FindOperation, correctCommandName: 'find' }, { - subclassCreator: () => new mongodb.FindAndModifyOperation(collection, { a: 1 }, {}), - subclassType: mongodb.FindAndModifyOperation, + subclassCreator: () => new FindAndModifyOperation(collection, { a: 1 }, {}), + subclassType: FindAndModifyOperation, correctCommandName: 'findAndModify' }, { - subclassCreator: () => new mongodb.FindOneAndDeleteOperation(collection, { a: 1 }, {}), - subclassType: mongodb.FindOneAndDeleteOperation, + subclassCreator: () => new FindOneAndDeleteOperation(collection, { a: 1 }, {}), + subclassType: FindOneAndDeleteOperation, correctCommandName: 'findAndModify' }, { - subclassCreator: () => - new mongodb.FindOneAndReplaceOperation(collection, { a: 2 }, { a: 1 }, {}), - subclassType: mongodb.FindOneAndReplaceOperation, + subclassCreator: () => new FindOneAndReplaceOperation(collection, { a: 2 }, { a: 1 }, {}), + subclassType: FindOneAndReplaceOperation, correctCommandName: 'findAndModify' }, { - subclassCreator: () => - new mongodb.FindOneAndUpdateOperation(collection, { a: 2 }, { $a: 1 }, {}), - subclassType: mongodb.FindOneAndUpdateOperation, + subclassCreator: () => new FindOneAndUpdateOperation(collection, { a: 2 }, { $a: 1 }, {}), + subclassType: FindOneAndUpdateOperation, correctCommandName: 'findAndModify' }, { subclassCreator: () => - new mongodb.GetMoreOperation( + new GetMoreOperation( collection.fullNamespace, Long.fromNumber(1), - {} as any as mongodb.Server, + {} as any as Server, {} ), - subclassType: mongodb.GetMoreOperation, + subclassType: GetMoreOperation, correctCommandName: 'getMore' }, { subclassCreator: () => - mongodb.CreateIndexesOperation.fromIndexDescriptionArray(db, 'bar', [{ key: { a: 1 } }]), - subclassType: mongodb.CreateIndexesOperation, + CreateIndexesOperation.fromIndexDescriptionArray(db, 'bar', [{ key: { a: 1 } }]), + subclassType: CreateIndexesOperation, correctCommandName: 'createIndexes' }, { - subclassCreator: () => new mongodb.DropIndexOperation(collection, 'a', {}), - subclassType: mongodb.DropIndexOperation, + subclassCreator: () => new DropIndexOperation(collection, 'a', {}), + subclassType: DropIndexOperation, correctCommandName: 'dropIndexes' }, { - subclassCreator: () => new mongodb.ListIndexesOperation(collection, {}), - subclassType: mongodb.ListIndexesOperation, + subclassCreator: () => new ListIndexesOperation(collection, {}), + subclassType: ListIndexesOperation, correctCommandName: 'listIndexes' }, { - subclassCreator: () => - new mongodb.InsertOperation(collection.fullNamespace, [{ a: 1 }], {}), - subclassType: mongodb.InsertOperation, + subclassCreator: () => new InsertOperation(collection.fullNamespace, [{ a: 1 }], {}), + subclassType: InsertOperation, correctCommandName: 'insert' }, { - subclassCreator: () => new mongodb.InsertOneOperation(collection, { a: 1 }, {}), - subclassType: mongodb.InsertOneOperation, + subclassCreator: () => new InsertOneOperation(collection, { a: 1 }, {}), + subclassType: InsertOneOperation, correctCommandName: 'insert' }, { subclassCreator: () => - new mongodb.KillCursorsOperation( + new KillCursorsOperation( Long.fromNumber(1), collection.fullNamespace, - {} as any as mongodb.Server, + {} as any as Server, {} ), - subclassType: mongodb.KillCursorsOperation, + subclassType: KillCursorsOperation, correctCommandName: 'killCursors' }, { - subclassCreator: () => new mongodb.ListCollectionsOperation(db, { a: 1 }, {}), - subclassType: mongodb.ListCollectionsOperation, + subclassCreator: () => new ListCollectionsOperation(db, { a: 1 }, {}), + subclassType: ListCollectionsOperation, correctCommandName: 'listCollections' }, { - subclassCreator: () => new mongodb.ListDatabasesOperation(db, {}), - subclassType: mongodb.ListDatabasesOperation, + subclassCreator: () => new ListDatabasesOperation(db, {}), + subclassType: ListDatabasesOperation, correctCommandName: 'listDatabases' }, { - subclassCreator: () => new mongodb.ProfilingLevelOperation(db, {}), - subclassType: mongodb.ProfilingLevelOperation, + subclassCreator: () => new ProfilingLevelOperation(db, {}), + subclassType: ProfilingLevelOperation, correctCommandName: 'profile' }, { - subclassCreator: () => new mongodb.RemoveUserOperation(db, 'userToDrop', {}), - subclassType: mongodb.RemoveUserOperation, + subclassCreator: () => new RemoveUserOperation(db, 'userToDrop', {}), + subclassType: RemoveUserOperation, correctCommandName: 'dropUser' }, { - subclassCreator: () => new mongodb.RenameOperation(collection, 'newName', {}), - subclassType: mongodb.RenameOperation, + subclassCreator: () => new RenameOperation(collection, 'newName', {}), + subclassType: RenameOperation, correctCommandName: 'renameCollection' }, { subclassCreator: () => - new mongodb.RunCommandOperation( - new mongodb.MongoDBNamespace('foo', 'bar'), + new RunCommandOperation( + new MongoDBNamespace('foo', 'bar'), { dummyCommand: 'dummyCommand' }, {} ), - subclassType: mongodb.RunCommandOperation, + subclassType: RunCommandOperation, correctCommandName: 'runCommand' }, { subclassCreator: () => - new mongodb.CreateSearchIndexesOperation(collection, [{ definition: { a: 1 } }]), - subclassType: mongodb.CreateSearchIndexesOperation, + new CreateSearchIndexesOperation(collection, [{ definition: { a: 1 } }]), + subclassType: CreateSearchIndexesOperation, correctCommandName: 'createSearchIndexes' }, { - subclassCreator: () => new mongodb.DropSearchIndexOperation(collection, 'dummyName'), - subclassType: mongodb.DropSearchIndexOperation, + subclassCreator: () => new DropSearchIndexOperation(collection, 'dummyName'), + subclassType: DropSearchIndexOperation, correctCommandName: 'dropSearchIndex' }, { subclassCreator: () => - new mongodb.UpdateSearchIndexOperation(collection, 'dummyName', { + new UpdateSearchIndexOperation(collection, 'dummyName', { a: 1 }), - subclassType: mongodb.UpdateSearchIndexOperation, + subclassType: UpdateSearchIndexOperation, correctCommandName: 'updateSearchIndex' }, { - subclassCreator: () => new mongodb.SetProfilingLevelOperation(db, 'all', {}), - subclassType: mongodb.SetProfilingLevelOperation, + subclassCreator: () => new SetProfilingLevelOperation(db, 'all', {}), + subclassType: SetProfilingLevelOperation, correctCommandName: 'profile' }, { - subclassCreator: () => new mongodb.DbStatsOperation(db, {}), - subclassType: mongodb.DbStatsOperation, + subclassCreator: () => new DbStatsOperation(db, {}), + subclassType: DbStatsOperation, correctCommandName: 'dbStats' }, { subclassCreator: () => - new mongodb.UpdateOperation( - collection.fullNamespace, - [{ q: { a: 1 }, u: { $a: 2 } }], - {} - ), - subclassType: mongodb.UpdateOperation, + new UpdateOperation(collection.fullNamespace, [{ q: { a: 1 }, u: { $a: 2 } }], {}), + subclassType: UpdateOperation, correctCommandName: 'update' }, { subclassCreator: () => - new mongodb.UpdateOneOperation(collection.fullNamespace, { a: 1 }, { $a: 2 }, {}), - subclassType: mongodb.UpdateOneOperation, + new UpdateOneOperation(collection.fullNamespace, { a: 1 }, { $a: 2 }, {}), + subclassType: UpdateOneOperation, correctCommandName: 'update' }, { subclassCreator: () => - new mongodb.UpdateManyOperation(collection.fullNamespace, { a: 1 }, { $a: 2 }, {}), - subclassType: mongodb.UpdateManyOperation, + new UpdateManyOperation(collection.fullNamespace, { a: 1 }, { $a: 2 }, {}), + subclassType: UpdateManyOperation, correctCommandName: 'update' }, { subclassCreator: () => - new mongodb.ReplaceOneOperation(collection.fullNamespace, { a: 1 }, { b: 1 }, {}), - subclassType: mongodb.ReplaceOneOperation, + new ReplaceOneOperation(collection.fullNamespace, { a: 1 }, { b: 1 }, {}), + subclassType: ReplaceOneOperation, correctCommandName: 'update' }, { - subclassCreator: () => new mongodb.ValidateCollectionOperation(admin, 'bar', {}), - subclassType: mongodb.ValidateCollectionOperation, + subclassCreator: () => new ValidateCollectionOperation(admin, 'bar', {}), + subclassType: ValidateCollectionOperation, correctCommandName: 'validate' } ]; @@ -269,7 +310,7 @@ describe('abstract operation', function () { expect(subclassInstance.commandName).to.equal(correctCommandName); }); - if (subclassType !== mongodb.RunCommandOperation) { + if (subclassType !== RunCommandOperation) { it( `operation.commandName is a key in the command document`, { @@ -278,7 +319,7 @@ describe('abstract operation', function () { async function () { const session = client.startSession(); const pool = Array.from(client.topology.s.servers.values())[0].pool; - const timeoutContext = mongodb.TimeoutContext.create({ + const timeoutContext = TimeoutContext.create({ waitQueueTimeoutMS: 1000, serverSelectionTimeoutMS: 1000 }); diff --git a/test/integration/crud/client_bulk_write.test.ts b/test/integration/crud/client_bulk_write.test.ts index a502c88318b..247d4683b87 100644 --- a/test/integration/crud/client_bulk_write.test.ts +++ b/test/integration/crud/client_bulk_write.test.ts @@ -6,10 +6,10 @@ import { type Connection, type ConnectionPool, type MongoClient, - MongoOperationTimeoutError, - now, - TimeoutContext -} from '../../mongodb'; + MongoOperationTimeoutError +} from '../../../src'; +import { TimeoutContext } from '../../../src/timeout'; +import { now } from '../../../src/utils'; import { clearFailPoint, configureFailPoint, diff --git a/test/integration/crud/crud.prose.test.ts b/test/integration/crud/crud.prose.test.ts index 79cedead66e..67241f58031 100644 --- a/test/integration/crud/crud.prose.test.ts +++ b/test/integration/crud/crud.prose.test.ts @@ -1,18 +1,18 @@ import { expect } from 'chai'; import { once } from 'events'; -import { type CommandStartedEvent } from '../../../mongodb'; import { type ClientBulkWriteModel, type ClientSession, type Collection, + type CommandStartedEvent, type Document, MongoBulkWriteError, type MongoClient, MongoClientBulkWriteError, MongoInvalidArgumentError, MongoServerError -} from '../../mongodb'; +} from '../../../src'; import { getEncryptExtraOptions } from '../../tools/utils'; import { filterForCommands } from '../shared'; diff --git a/test/integration/crud/explain.test.ts b/test/integration/crud/explain.test.ts index 8236cbf00ee..bc6783d04a4 100644 --- a/test/integration/crud/explain.test.ts +++ b/test/integration/crud/explain.test.ts @@ -9,7 +9,7 @@ import { type MongoClient, MongoOperationTimeoutError, MongoServerError -} from '../../mongodb'; +} from '../../../src'; import { clearFailPoint, configureFailPoint, measureDuration } from '../../tools/utils'; import { filterForCommands } from '../shared'; diff --git a/test/integration/crud/find_and_modify.test.ts b/test/integration/crud/find_and_modify.test.ts index 524ad7b06a5..85e1026b5dd 100644 --- a/test/integration/crud/find_and_modify.test.ts +++ b/test/integration/crud/find_and_modify.test.ts @@ -6,7 +6,7 @@ import { type MongoClient, MongoServerError, ObjectId -} from '../../mongodb'; +} from '../../../src'; import { setupDatabase } from '../shared'; describe('Collection (#findOneAnd...)', function () { diff --git a/test/integration/crud/maxTimeMS.test.ts b/test/integration/crud/maxTimeMS.test.ts index f4d83ddc2f3..62052b544cf 100644 --- a/test/integration/crud/maxTimeMS.test.ts +++ b/test/integration/crud/maxTimeMS.test.ts @@ -8,7 +8,7 @@ import { type MongoClient, MongoCursorExhaustedError, MongoServerError -} from '../../mongodb'; +} from '../../../src'; describe('MaxTimeMS', function () { let client: MongoClient; From ac125a43e8d99c6a1d7d716b485842bb9338debd Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 18:59:35 +0200 Subject: [PATCH 23/41] test(NODE-7179): migrate test/integration/enumerate_databases.test.ts --- test/integration/enumerate_databases.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/enumerate_databases.test.ts b/test/integration/enumerate_databases.test.ts index 0b52ade1303..37bc84fe65e 100644 --- a/test/integration/enumerate_databases.test.ts +++ b/test/integration/enumerate_databases.test.ts @@ -1,7 +1,7 @@ import { expect } from 'chai'; import { once } from 'events'; -import { type MongoClient, MongoServerError } from '../mongodb'; +import { type MongoClient, MongoServerError } from '../../src'; import { TestBuilder, UnifiedTestSuiteBuilder } from '../tools/unified_suite_builder'; const metadata: MongoDBMetadataUI = { From bee9e2fdf2baebf8e408947af8898dadb80bda5c Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 19:44:54 +0200 Subject: [PATCH 24/41] test(NODE-7179): migrate test/integration/enumerate_databases.prose.test.ts --- test/integration/enumerate_databases.prose.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/enumerate_databases.prose.test.ts b/test/integration/enumerate_databases.prose.test.ts index 9528d68fada..4d3bad1e649 100644 --- a/test/integration/enumerate_databases.prose.test.ts +++ b/test/integration/enumerate_databases.prose.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import type { MongoClient } from '../mongodb'; +import type { MongoClient } from '../../src'; const REQUIRED_DBS = ['admin', 'local', 'config']; const DB_NAME = 'listDatabasesTest'; From 3df73dd0bdc04070b281246940d072d854596402 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 20:01:07 +0200 Subject: [PATCH 25/41] test(NODE-7179): migrate test/integration/node-specific/* --- .../node-specific/abort_signal.test.ts | 13 +++++----- .../node-specific/abstract_cursor.test.ts | 9 ++++--- .../node-specific/auto_connect.test.ts | 4 ++-- .../node-specific/auto_encrypter.test.ts | 4 ++-- .../node-specific/bson-options/raw.test.ts | 2 +- .../bson-options/use_bigint_64.test.ts | 24 +++++++++---------- .../bson-options/utf8_validation.test.ts | 19 ++++----------- .../node-specific/client_close.test.ts | 4 ++-- .../node-specific/client_encryption.test.ts | 2 +- .../comment_with_falsy_values.test.ts | 2 +- .../convert_socket_errors.test.ts | 3 ++- .../node-specific/crypt_shared_lib.test.ts | 4 +--- test/integration/node-specific/errors.test.ts | 2 +- test/integration/node-specific/ipv6.test.ts | 2 +- .../node-specific/topology.test.ts | 2 +- .../node-specific/validate_collection.test.ts | 2 +- 16 files changed, 43 insertions(+), 55 deletions(-) diff --git a/test/integration/node-specific/abort_signal.test.ts b/test/integration/node-specific/abort_signal.test.ts index 6cec61ec391..db26f80ecc1 100644 --- a/test/integration/node-specific/abort_signal.test.ts +++ b/test/integration/node-specific/abort_signal.test.ts @@ -10,19 +10,18 @@ import { AggregationCursor, Code, type Collection, - Connection, - ConnectionPool, type Db, FindCursor, ListCollectionsCursor, type Log, type MongoClient, MongoServerError, - promiseWithResolvers, - ReadPreference, - setDifference, - StateMachine -} from '../../mongodb'; + ReadPreference +} from '../../../src'; +import { StateMachine } from '../../../src/client-side-encryption/state_machine'; +import { Connection } from '../../../src/cmap/connection'; +import { ConnectionPool } from '../../../src/cmap/connection_pool'; +import { promiseWithResolvers, setDifference } from '../../../src/utils'; import { clearFailPoint, configureFailPoint, diff --git a/test/integration/node-specific/abstract_cursor.test.ts b/test/integration/node-specific/abstract_cursor.test.ts index 3a39cfe03e5..7a5e7151f23 100644 --- a/test/integration/node-specific/abstract_cursor.test.ts +++ b/test/integration/node-specific/abstract_cursor.test.ts @@ -7,16 +7,15 @@ import { AbstractCursor, type Collection, type CommandStartedEvent, - CSOTTimeoutContext, - CursorTimeoutContext, CursorTimeoutMode, type FindCursor, MongoAPIError, type MongoClient, MongoCursorExhaustedError, - MongoOperationTimeoutError, - TimeoutContext -} from '../../mongodb'; + MongoOperationTimeoutError +} from '../../../src'; +import { CursorTimeoutContext } from '../../../src/cursor/abstract_cursor'; +import { CSOTTimeoutContext, TimeoutContext } from '../../../src/timeout'; import { clearFailPoint, configureFailPoint } from '../../tools/utils'; import { filterForCommands } from '../shared'; diff --git a/test/integration/node-specific/auto_connect.test.ts b/test/integration/node-specific/auto_connect.test.ts index f0850049632..42bf6ba947c 100644 --- a/test/integration/node-specific/auto_connect.test.ts +++ b/test/integration/node-specific/auto_connect.test.ts @@ -10,9 +10,9 @@ import { MongoClient, MongoNotConnectedError, ProfilingLevel, - Topology, TopologyType -} from '../../mongodb'; +} from '../../../src'; +import { Topology } from '../../../src/sdam/topology'; import { sleep } from '../../tools/utils'; describe('When executing an operation for the first time', () => { diff --git a/test/integration/node-specific/auto_encrypter.test.ts b/test/integration/node-specific/auto_encrypter.test.ts index 4dbbbfd8c72..3e80ad5fb0c 100644 --- a/test/integration/node-specific/auto_encrypter.test.ts +++ b/test/integration/node-specific/auto_encrypter.test.ts @@ -7,9 +7,9 @@ import { type MongoClient, MongoNetworkTimeoutError, MongoRuntimeError, - StateMachine, type UUID -} from '../../mongodb'; +} from '../../../src'; +import { StateMachine } from '../../../src/client-side-encryption/state_machine'; describe('mongocryptd auto spawn', function () { let client: MongoClient; diff --git a/test/integration/node-specific/bson-options/raw.test.ts b/test/integration/node-specific/bson-options/raw.test.ts index 455344f9a92..33ace0b1d6e 100644 --- a/test/integration/node-specific/bson-options/raw.test.ts +++ b/test/integration/node-specific/bson-options/raw.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { type Collection, type MongoClient, ObjectId } from '../../../mongodb'; +import { type Collection, type MongoClient, ObjectId } from '../../../../src'; describe('raw bson support', () => { describe('raw', () => { diff --git a/test/integration/node-specific/bson-options/use_bigint_64.test.ts b/test/integration/node-specific/bson-options/use_bigint_64.test.ts index 6c6714351ab..d0c981f0af2 100644 --- a/test/integration/node-specific/bson-options/use_bigint_64.test.ts +++ b/test/integration/node-specific/bson-options/use_bigint_64.test.ts @@ -1,13 +1,13 @@ import { expect } from 'chai'; import { - BSON, type Collection, type Db, MongoAPIError, type MongoClient, type WithId -} from '../../../mongodb'; +} from '../../../../src'; +import { BSONError, type Document, Long } from '../../../../src/bson'; describe('useBigInt64 option', function () { let client: MongoClient; @@ -85,7 +85,7 @@ describe('useBigInt64 option', function () { }); describe('when set to true at collection level', function () { - let res: WithId | null; + let res: WithId | null; beforeEach(async function () { client = await this.configuration.newClient().connect(); @@ -104,7 +104,7 @@ describe('useBigInt64 option', function () { }); describe('when set to false at collection level', function () { - let res: WithId | null; + let res: WithId | null; beforeEach(async function () { client = await this.configuration.newClient().connect(); @@ -123,7 +123,7 @@ describe('useBigInt64 option', function () { }); describe('when set to true', function () { - let res: WithId | null; + let res: WithId | null; beforeEach(async function () { client = await this.configuration.newClient({}, { useBigInt64: true }).connect(); @@ -132,7 +132,7 @@ describe('useBigInt64 option', function () { await db.dropCollection('useBigInt64Test'); coll = await db.createCollection('useBigInt64Test'); - await coll.insertOne({ a: new BSON.Long(1) }); + await coll.insertOne({ a: new Long(1) }); res = await coll.findOne({ a: 1n }); }); @@ -177,7 +177,7 @@ describe('useBigInt64 option', function () { .listCollections() .toArray() .catch(e => e); - expect(e).to.be.instanceOf(BSON.BSONError); + expect(e).to.be.instanceOf(BSONError); }); }); @@ -194,7 +194,7 @@ describe('useBigInt64 option', function () { .insertOne({ name: 'bailey ' }) .then(() => null) .catch(e => e); - expect(e).to.be.instanceOf(BSON.BSONError); + expect(e).to.be.instanceOf(BSONError); }); }); @@ -211,7 +211,7 @@ describe('useBigInt64 option', function () { .insertOne({ a: 10n }, { promoteLongs: false, useBigInt64: true }) .catch(e => e); - expect(e).to.be.instanceOf(BSON.BSONError); + expect(e).to.be.instanceOf(BSONError); }); }); }); @@ -249,7 +249,7 @@ describe('useBigInt64 option', function () { .listCollections() .toArray() .catch(e => e); - expect(e).to.be.instanceOf(BSON.BSONError); + expect(e).to.be.instanceOf(BSONError); }); }); @@ -266,7 +266,7 @@ describe('useBigInt64 option', function () { .insertOne({ name: 'bailey ' }) .then(() => null) .catch(e => e); - expect(e).to.be.instanceOf(BSON.BSONError); + expect(e).to.be.instanceOf(BSONError); }); }); @@ -282,7 +282,7 @@ describe('useBigInt64 option', function () { .insertOne({ a: 10n }, { promoteValues: false, useBigInt64: true }) .catch(e => e); - expect(e).to.be.instanceOf(BSON.BSONError); + expect(e).to.be.instanceOf(BSONError); }); }); }); diff --git a/test/integration/node-specific/bson-options/utf8_validation.test.ts b/test/integration/node-specific/bson-options/utf8_validation.test.ts index 2586b28ed2d..84a230478a8 100644 --- a/test/integration/node-specific/bson-options/utf8_validation.test.ts +++ b/test/integration/node-specific/bson-options/utf8_validation.test.ts @@ -2,14 +2,9 @@ import { expect } from 'chai'; import * as net from 'net'; import * as sinon from 'sinon'; -import { - BSON, - BSONError, - type Collection, - type MongoClient, - MongoServerError, - OpMsgResponse -} from '../../../mongodb'; +import { type Collection, type MongoClient, MongoServerError } from '../../../../src'; +import { BSONError, deserialize } from '../../../../src/bson'; +import { OpMsgResponse } from '../../../../src/cmap/commands'; describe('class MongoDBResponse', () => { let client; @@ -46,12 +41,8 @@ describe('class MongoDBResponse', () => { // Check that the server sent us broken BSON (bad UTF) expect(() => { - BSON.deserialize(spy.returnValues[0], { validation: { utf8: true } }); - }).to.throw( - BSON.BSONError, - /Invalid UTF/i, - 'did not generate error with invalid utf8' - ); + deserialize(spy.returnValues[0], { validation: { utf8: true } }); + }).to.throw(BSONError, /Invalid UTF/i, 'did not generate error with invalid utf8'); } await generateWriteErrorWithInvalidUtf8(); diff --git a/test/integration/node-specific/client_close.test.ts b/test/integration/node-specific/client_close.test.ts index b0179aab60e..ad5bd9cebcb 100644 --- a/test/integration/node-specific/client_close.test.ts +++ b/test/integration/node-specific/client_close.test.ts @@ -2,13 +2,13 @@ import * as events from 'node:events'; import { expect } from 'chai'; -import { getCSFLEKMSProviders } from '../../csfle-kms-providers'; import { type Collection, type CommandStartedEvent, type FindCursor, type MongoClient -} from '../../mongodb'; +} from '../../../src'; +import { getCSFLEKMSProviders } from '../../csfle-kms-providers'; import { configureMongocryptdSpawnHooks } from '../../tools/utils'; import { filterForCommands } from '../shared'; import { runScriptAndGetProcessInfo } from './resource_tracking_script_builder'; diff --git a/test/integration/node-specific/client_encryption.test.ts b/test/integration/node-specific/client_encryption.test.ts index 51f7b9f650f..0fcee739c53 100644 --- a/test/integration/node-specific/client_encryption.test.ts +++ b/test/integration/node-specific/client_encryption.test.ts @@ -3,13 +3,13 @@ import { readFileSync } from 'fs'; import * as sinon from 'sinon'; import { MongoCryptError } from '../../../src'; +import { Binary, type Collection, Int32, Long, type MongoClient, UUID } from '../../../src'; import { ClientEncryption, type DataKey } from '../../../src/client-side-encryption/client_encryption'; import { MongoCryptInvalidArgumentError } from '../../../src/client-side-encryption/errors'; import { StateMachine } from '../../../src/client-side-encryption/state_machine'; -import { Binary, type Collection, Int32, Long, type MongoClient, UUID } from '../../mongodb'; function readHttpResponse(path) { let data = readFileSync(path, 'utf8').toString(); diff --git a/test/integration/node-specific/comment_with_falsy_values.test.ts b/test/integration/node-specific/comment_with_falsy_values.test.ts index 6d0819fd33b..6be4ba54965 100644 --- a/test/integration/node-specific/comment_with_falsy_values.test.ts +++ b/test/integration/node-specific/comment_with_falsy_values.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { type Collection, type CommandStartedEvent, Long, type MongoClient } from '../../mongodb'; +import { type Collection, type CommandStartedEvent, Long, type MongoClient } from '../../../src'; import { TestBuilder, UnifiedTestSuiteBuilder } from '../../tools/unified_suite_builder'; const falsyValues = [0, false, '', Long.ZERO, null, NaN] as const; diff --git a/test/integration/node-specific/convert_socket_errors.test.ts b/test/integration/node-specific/convert_socket_errors.test.ts index 0f3508632ea..e3959023b70 100644 --- a/test/integration/node-specific/convert_socket_errors.test.ts +++ b/test/integration/node-specific/convert_socket_errors.test.ts @@ -3,7 +3,8 @@ import { Duplex } from 'node:stream'; import { expect } from 'chai'; import * as sinon from 'sinon'; -import { Connection, type MongoClient, MongoNetworkError, ns } from '../../mongodb'; +import { Connection, type MongoClient, MongoNetworkError } from '../../../src'; +import { ns } from '../../../src/utils'; import { clearFailPoint, configureFailPoint } from '../../tools/utils'; describe('Socket Errors', () => { diff --git a/test/integration/node-specific/crypt_shared_lib.test.ts b/test/integration/node-specific/crypt_shared_lib.test.ts index a3d9412aa84..b7954ae5963 100644 --- a/test/integration/node-specific/crypt_shared_lib.test.ts +++ b/test/integration/node-specific/crypt_shared_lib.test.ts @@ -2,11 +2,9 @@ import { expect } from 'chai'; import { spawnSync } from 'child_process'; import { dirname } from 'path'; -import { BSON } from '../../mongodb'; +import { EJSON } from '../../../src/bson'; import { getEncryptExtraOptions } from '../../tools/utils'; -const { EJSON } = BSON; - describe('crypt shared library', () => { it('should fail if no library can be found in the search path and cryptSharedLibRequired is set', async function () { const env = { diff --git a/test/integration/node-specific/errors.test.ts b/test/integration/node-specific/errors.test.ts index ff00db65647..83edcff62dd 100644 --- a/test/integration/node-specific/errors.test.ts +++ b/test/integration/node-specific/errors.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { MongoClient, MongoServerSelectionError, ReadPreference } from '../../mongodb'; +import { MongoClient, MongoServerSelectionError, ReadPreference } from '../../../src'; describe('Error (Integration)', function () { it('NODE-5296: handles aggregate errors from dns lookup', async function () { diff --git a/test/integration/node-specific/ipv6.test.ts b/test/integration/node-specific/ipv6.test.ts index b030fdb210c..b3b76c85176 100644 --- a/test/integration/node-specific/ipv6.test.ts +++ b/test/integration/node-specific/ipv6.test.ts @@ -8,7 +8,7 @@ import { type MongoClient, ReadPreference, TopologyType -} from '../../mongodb'; +} from '../../../src'; describe('IPv6 Addresses', () => { let client: MongoClient; diff --git a/test/integration/node-specific/topology.test.ts b/test/integration/node-specific/topology.test.ts index 6ebf45bfeba..cc35303c0f5 100644 --- a/test/integration/node-specific/topology.test.ts +++ b/test/integration/node-specific/topology.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { MongoClient, type MongoClientOptions, type Topology } from '../../mongodb'; +import { MongoClient, type MongoClientOptions, type Topology } from '../../../src'; describe('Topology', function () { it('should correctly track states of a topology', { diff --git a/test/integration/node-specific/validate_collection.test.ts b/test/integration/node-specific/validate_collection.test.ts index b19f19e708a..d1c4ac790ff 100644 --- a/test/integration/node-specific/validate_collection.test.ts +++ b/test/integration/node-specific/validate_collection.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { ValidateCollectionOperation } from '../../mongodb'; +import { ValidateCollectionOperation } from '../../../src/operations/validate_collection'; describe('ValidateCollectionOperation', function () { let client; From c99c7e813579123e8e4199c3dc39ee3b4eb5fce9 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 20:04:15 +0200 Subject: [PATCH 26/41] test(NODE-7179): migrate test/integration/auth/mongodb_aws.prose.test.ts --- test/integration/auth/mongodb_aws.prose.test.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/integration/auth/mongodb_aws.prose.test.ts b/test/integration/auth/mongodb_aws.prose.test.ts index d1327c276d1..126a0a19a68 100644 --- a/test/integration/auth/mongodb_aws.prose.test.ts +++ b/test/integration/auth/mongodb_aws.prose.test.ts @@ -2,7 +2,8 @@ import * as process from 'node:process'; import { expect } from 'chai'; -import { AWSSDKCredentialProvider, type MongoClient, MongoServerError } from '../../mongodb'; +import { type MongoClient, MongoServerError } from '../../../src'; +import { AWSSDKCredentialProvider } from '../../../src/cmap/auth/aws_temporary_credentials'; const isMongoDBAWSAuthEnvironment = (process.env.MONGODB_URI ?? '').includes('MONGODB-AWS'); From e71265d42242355760ba073487e6a5cc4bddb28e Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 20:07:11 +0200 Subject: [PATCH 27/41] test(NODE-7179): migrate test/integration/server-selection/* --- test/integration/server-selection/operation_count.test.ts | 8 ++------ .../server_selection.prose.operation_count.test.ts | 8 ++------ ...server_selection.prose.sharded_retryable_reads.test.ts | 2 +- ...erver_selection.prose.sharded_retryable_writes.test.ts | 2 +- 4 files changed, 6 insertions(+), 14 deletions(-) diff --git a/test/integration/server-selection/operation_count.test.ts b/test/integration/server-selection/operation_count.test.ts index 6f6a68f10a9..6a74bfe9984 100644 --- a/test/integration/server-selection/operation_count.test.ts +++ b/test/integration/server-selection/operation_count.test.ts @@ -1,12 +1,8 @@ import { expect } from 'chai'; import * as sinon from 'sinon'; -import { - type AbstractCursor, - type Collection, - ConnectionPool, - type MongoClient -} from '../../mongodb'; +import { type AbstractCursor, type Collection, type MongoClient } from '../../../src'; +import { ConnectionPool } from '../../../src/cmap/connection_pool'; import { type FailCommandFailPoint } from '../../tools/utils'; const testMetadata: MongoDBMetadataUI = { diff --git a/test/integration/server-selection/server_selection.prose.operation_count.test.ts b/test/integration/server-selection/server_selection.prose.operation_count.test.ts index b4a7d9bf47b..fc8da79bdb9 100644 --- a/test/integration/server-selection/server_selection.prose.operation_count.test.ts +++ b/test/integration/server-selection/server_selection.prose.operation_count.test.ts @@ -1,11 +1,7 @@ import { expect } from 'chai'; -import { - type Collection, - type CommandStartedEvent, - HostAddress, - type MongoClient -} from '../../mongodb'; +import { type Collection, type CommandStartedEvent, type MongoClient } from '../../../src'; +import { HostAddress } from '../../../src/utils'; import { waitUntilPoolsFilled } from '../../tools/utils'; const failPoint = { diff --git a/test/integration/server-selection/server_selection.prose.sharded_retryable_reads.test.ts b/test/integration/server-selection/server_selection.prose.sharded_retryable_reads.test.ts index 3b72fe93eb1..c70148e7441 100644 --- a/test/integration/server-selection/server_selection.prose.sharded_retryable_reads.test.ts +++ b/test/integration/server-selection/server_selection.prose.sharded_retryable_reads.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import type { CommandFailedEvent, CommandSucceededEvent } from '../../mongodb'; +import type { CommandFailedEvent, CommandSucceededEvent } from '../../../src'; const TEST_METADATA = { requires: { mongodb: '>=4.2.9', topology: 'sharded' } }; const FAIL_COMMAND = { diff --git a/test/integration/server-selection/server_selection.prose.sharded_retryable_writes.test.ts b/test/integration/server-selection/server_selection.prose.sharded_retryable_writes.test.ts index 8b1ea113df4..9550b3dcefc 100644 --- a/test/integration/server-selection/server_selection.prose.sharded_retryable_writes.test.ts +++ b/test/integration/server-selection/server_selection.prose.sharded_retryable_writes.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import type { CommandFailedEvent, CommandSucceededEvent } from '../../mongodb'; +import type { CommandFailedEvent, CommandSucceededEvent } from '../../../src'; const TEST_METADATA = { requires: { mongodb: '>=4.3.1', topology: 'sharded' } }; const FAIL_COMMAND = { From 191a2d2e66ad86aff2f34f12d66757f7c4a12927 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 20:07:53 +0200 Subject: [PATCH 28/41] test(NODE-7179): migrate test/integration/retryable-reads/* --- .../retryable-reads/retryable_reads.spec.prose.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/retryable-reads/retryable_reads.spec.prose.test.ts b/test/integration/retryable-reads/retryable_reads.spec.prose.test.ts index ca0576d218a..65791c2ed68 100644 --- a/test/integration/retryable-reads/retryable_reads.spec.prose.test.ts +++ b/test/integration/retryable-reads/retryable_reads.spec.prose.test.ts @@ -1,7 +1,7 @@ /* eslint-disable @typescript-eslint/no-non-null-assertion */ import { expect } from 'chai'; -import { type Collection, type MongoClient } from '../../mongodb'; +import { type Collection, type MongoClient } from '../../../src'; describe('Retryable Reads Spec Prose', () => { let client: MongoClient, failPointName; From f1b4d8cd4a01b5e7b4ffee8daec883f00f6934c8 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 20:08:37 +0200 Subject: [PATCH 29/41] test(NODE-7179): migrate test/integration/connection-monitoring-and-pooling/* --- .../connection_pool.test.ts | 2 +- .../connection-monitoring-and-pooling/rtt_pinger.test.ts | 8 ++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/test/integration/connection-monitoring-and-pooling/connection_pool.test.ts b/test/integration/connection-monitoring-and-pooling/connection_pool.test.ts index d073b5a1125..d478b2bac1e 100644 --- a/test/integration/connection-monitoring-and-pooling/connection_pool.test.ts +++ b/test/integration/connection-monitoring-and-pooling/connection_pool.test.ts @@ -8,7 +8,7 @@ import { type Db, type MongoClient, type Server -} from '../../mongodb'; +} from '../../../src'; import { clearFailPoint, configureFailPoint, sleep } from '../../tools/utils'; describe('Connection Pool', function () { diff --git a/test/integration/connection-monitoring-and-pooling/rtt_pinger.test.ts b/test/integration/connection-monitoring-and-pooling/rtt_pinger.test.ts index 5b659290ef6..471a6cddf54 100644 --- a/test/integration/connection-monitoring-and-pooling/rtt_pinger.test.ts +++ b/test/integration/connection-monitoring-and-pooling/rtt_pinger.test.ts @@ -2,12 +2,8 @@ import { expect } from 'chai'; import * as semver from 'semver'; import * as sinon from 'sinon'; -import { - type Connection, - LEGACY_HELLO_COMMAND, - type MongoClient, - type RTTPinger -} from '../../mongodb'; +import { type Connection, type MongoClient, type RTTPinger } from '../../../src'; +import { LEGACY_HELLO_COMMAND } from '../../../src/constants'; import { sleep } from '../../tools/utils'; /** From ef2932c99a7aa8dab5799b512cd203a98674cce8 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 20:09:53 +0200 Subject: [PATCH 30/41] test(NODE-7179): migrate test/integration/client-side-operations-timeout/* --- .../client_side_operations_timeout.prose.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 52d77487b2b..e8736ac3b65 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -7,10 +7,10 @@ import * as semver from 'semver'; import * as sinon from 'sinon'; import { pipeline } from 'stream/promises'; -import { type CommandStartedEvent } from '../../../mongodb'; import { Binary, ClientEncryption, + type CommandStartedEvent, type CommandSucceededEvent, GridFSBucket, MongoBulkWriteError, From 48bbfc8452067b7e104a1fc89df768ef26367b18 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Thu, 16 Oct 2025 20:22:24 +0200 Subject: [PATCH 31/41] test(NODE-7179): fix invalid imports --- ...encryption.prose.26.custom_aws_credential_providers.test.ts | 3 ++- .../client_side_encryption.prose.27.text_queries.test.ts | 3 ++- test/integration/client-side-encryption/driver.test.ts | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.26.custom_aws_credential_providers.test.ts b/test/integration/client-side-encryption/client_side_encryption.prose.26.custom_aws_credential_providers.test.ts index c95f3733acc..a7351c96fb9 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.26.custom_aws_credential_providers.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.prose.26.custom_aws_credential_providers.test.ts @@ -1,7 +1,8 @@ import { expect } from 'chai'; -import { AWSSDKCredentialProvider, Binary, MongoClient } from '../../../src'; +import { Binary, MongoClient } from '../../../src'; import { ClientEncryption } from '../../../src/client-side-encryption/client_encryption'; +import { AWSSDKCredentialProvider } from '../../../src/cmap/auth/aws_temporary_credentials'; import { getEncryptExtraOptions } from '../../tools/utils'; const metadata: MongoDBMetadataUI = { diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.27.text_queries.test.ts b/test/integration/client-side-encryption/client_side_encryption.prose.27.text_queries.test.ts index 0aef976f8b6..d942fd74242 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.27.text_queries.test.ts +++ b/test/integration/client-side-encryption/client_side_encryption.prose.27.text_queries.test.ts @@ -4,7 +4,8 @@ import { join } from 'node:path'; import { type Binary, type Document, EJSON } from 'bson'; import { expect } from 'chai'; -import { ClientEncryption, type MongoClient, MongoDBCollectionNamespace } from '../../../src'; +import { ClientEncryption, type MongoClient } from '../../../src'; +import { MongoDBCollectionNamespace } from '../../../src/utils'; import { getCSFLEKMSProviders } from '../../csfle-kms-providers'; const metadata: MongoDBMetadataUI = { diff --git a/test/integration/client-side-encryption/driver.test.ts b/test/integration/client-side-encryption/driver.test.ts index 8e0500733ca..dfae670e182 100644 --- a/test/integration/client-side-encryption/driver.test.ts +++ b/test/integration/client-side-encryption/driver.test.ts @@ -10,7 +10,6 @@ import { BSON, type Collection, type CommandStartedEvent, - Connection, type MongoClient, MongoCryptCreateDataKeyError, MongoCryptCreateEncryptedCollectionError, @@ -18,6 +17,7 @@ import { } from '../../../src'; import { ClientEncryption } from '../../../src/client-side-encryption/client_encryption'; import { StateMachine } from '../../../src/client-side-encryption/state_machine'; +import { Connection } from '../../../src/cmap/connection'; import { CSOTTimeoutContext, TimeoutContext } from '../../../src/timeout'; import { resolveTimeoutOptions } from '../../../src/utils'; import { getCSFLEKMSProviders } from '../../csfle-kms-providers'; From 718b90c42c46996db5f2b3596ae97441fabaff43 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Fri, 17 Oct 2025 10:39:38 +0200 Subject: [PATCH 32/41] test(NODE-7179): fix duplicated import --- .../node-specific/client_encryption.test.ts | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/test/integration/node-specific/client_encryption.test.ts b/test/integration/node-specific/client_encryption.test.ts index 0fcee739c53..fb9743198b0 100644 --- a/test/integration/node-specific/client_encryption.test.ts +++ b/test/integration/node-specific/client_encryption.test.ts @@ -2,8 +2,15 @@ import { expect } from 'chai'; import { readFileSync } from 'fs'; import * as sinon from 'sinon'; -import { MongoCryptError } from '../../../src'; -import { Binary, type Collection, Int32, Long, type MongoClient, UUID } from '../../../src'; +import { + Binary, + type Collection, + Int32, + Long, + type MongoClient, + MongoCryptError, + UUID +} from '../../../src'; import { ClientEncryption, type DataKey From 946ffb14d93f73109364dca5ff1425fe0f222354 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Fri, 17 Oct 2025 11:15:17 +0200 Subject: [PATCH 33/41] test(NODE-7179): cleanup imports --- .../change-streams/change_streams.prose.test.ts | 8 ++++---- .../node-specific/examples/transactions.test.js | 2 +- test/integration/node-specific/examples/versioned_api.js | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/test/integration/change-streams/change_streams.prose.test.ts b/test/integration/change-streams/change_streams.prose.test.ts index d13acc73d09..7a876c0d88e 100644 --- a/test/integration/change-streams/change_streams.prose.test.ts +++ b/test/integration/change-streams/change_streams.prose.test.ts @@ -10,13 +10,13 @@ import { type CommandStartedEvent, type CommandSucceededEvent, type Document, - LEGACY_HELLO_COMMAND, Long, type MongoClient, MongoNetworkError, ObjectId, Timestamp -} from '../../mongodb'; +} from '../../../src'; +import { LEGACY_HELLO_COMMAND } from '../../../src/constants'; import * as mock from '../../tools/mongodb-mock/index'; import { setupDatabase } from '../shared'; @@ -31,7 +31,7 @@ function triggerResumableError(changeStream: ChangeStream, delay: number, onClos function triggerResumableError( changeStream: ChangeStream, delay: number | (() => void), - onClose?: () => void + onClose?: (err?: Error) => void ) { if (typeof delay === 'function') { onClose = delay; @@ -53,7 +53,7 @@ function triggerResumableError( } const nextStub = sinon.stub(changeStream.cursor, 'next').callsFake(async function () { - callback(new MongoNetworkError('error triggered from test')); + onClose(new MongoNetworkError('error triggered from test')); nextStub.restore(); }); diff --git a/test/integration/node-specific/examples/transactions.test.js b/test/integration/node-specific/examples/transactions.test.js index 4b72a88801c..c2e66a0d39a 100644 --- a/test/integration/node-specific/examples/transactions.test.js +++ b/test/integration/node-specific/examples/transactions.test.js @@ -1,6 +1,6 @@ 'use strict'; -const { MongoClient } = require('../../../mongodb'); +const { MongoClient } = require('mongodb'); // Yes, we are shadowing a global here but we are not actually ever printing anything in this file // This just so the examples can use console.log to make for nice copy pasting diff --git a/test/integration/node-specific/examples/versioned_api.js b/test/integration/node-specific/examples/versioned_api.js index e8e131c1309..b812964aac3 100644 --- a/test/integration/node-specific/examples/versioned_api.js +++ b/test/integration/node-specific/examples/versioned_api.js @@ -1,5 +1,5 @@ 'use strict'; -const { MongoClient } = require('../../../mongodb'); +const { MongoClient } = require('mongodb'); describe('examples.versionedApi:', function () { let uri; From 55b6a5c4013f8244515d838b203a8c8595c05d29 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Fri, 17 Oct 2025 11:31:07 +0200 Subject: [PATCH 34/41] test(NODE-7179): import client from src --- test/integration/node-specific/examples/transactions.test.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/node-specific/examples/transactions.test.js b/test/integration/node-specific/examples/transactions.test.js index c2e66a0d39a..af774ddeabd 100644 --- a/test/integration/node-specific/examples/transactions.test.js +++ b/test/integration/node-specific/examples/transactions.test.js @@ -1,6 +1,6 @@ 'use strict'; -const { MongoClient } = require('mongodb'); +const { MongoClient } = require('../../../../src'); // Yes, we are shadowing a global here but we are not actually ever printing anything in this file // This just so the examples can use console.log to make for nice copy pasting From 07b494c44e53c15ae64aa45a0e9836348b4ef1ae Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Fri, 17 Oct 2025 11:39:41 +0200 Subject: [PATCH 35/41] test(NODE-7179): revert changes in examples/ folder --- test/integration/node-specific/examples/transactions.test.js | 2 +- test/integration/node-specific/examples/versioned_api.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/node-specific/examples/transactions.test.js b/test/integration/node-specific/examples/transactions.test.js index af774ddeabd..4b72a88801c 100644 --- a/test/integration/node-specific/examples/transactions.test.js +++ b/test/integration/node-specific/examples/transactions.test.js @@ -1,6 +1,6 @@ 'use strict'; -const { MongoClient } = require('../../../../src'); +const { MongoClient } = require('../../../mongodb'); // Yes, we are shadowing a global here but we are not actually ever printing anything in this file // This just so the examples can use console.log to make for nice copy pasting diff --git a/test/integration/node-specific/examples/versioned_api.js b/test/integration/node-specific/examples/versioned_api.js index b812964aac3..e8e131c1309 100644 --- a/test/integration/node-specific/examples/versioned_api.js +++ b/test/integration/node-specific/examples/versioned_api.js @@ -1,5 +1,5 @@ 'use strict'; -const { MongoClient } = require('mongodb'); +const { MongoClient } = require('../../../mongodb'); describe('examples.versionedApi:', function () { let uri; From 0fd357e23b4d8878709d53045dcf357ce51c3fcf Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Fri, 17 Oct 2025 13:33:04 +0200 Subject: [PATCH 36/41] temporary skip tests in wip --- test/integration/crud/insert.test.js | 0 test/integration/crud/misc_cursors.test.js | 4104 +++++++++++++++++ .../bson-options/ignore_undefined.test.js | 0 .../bson-options/promote_buffers.test.js | 0 .../bson-options/promote_values.test.js | 0 .../node-specific/cursor_stream.test.js | 354 ++ 6 files changed, 4458 insertions(+) create mode 100644 test/integration/crud/insert.test.js create mode 100644 test/integration/crud/misc_cursors.test.js create mode 100644 test/integration/node-specific/bson-options/ignore_undefined.test.js create mode 100644 test/integration/node-specific/bson-options/promote_buffers.test.js create mode 100644 test/integration/node-specific/bson-options/promote_values.test.js create mode 100644 test/integration/node-specific/cursor_stream.test.js diff --git a/test/integration/crud/insert.test.js b/test/integration/crud/insert.test.js new file mode 100644 index 00000000000..e69de29bb2d diff --git a/test/integration/crud/misc_cursors.test.js b/test/integration/crud/misc_cursors.test.js new file mode 100644 index 00000000000..d6492854ce3 --- /dev/null +++ b/test/integration/crud/misc_cursors.test.js @@ -0,0 +1,4104 @@ +'use strict'; +const { assert: test, filterForCommands, setupDatabase } = require('../shared'); +const { runLater, sleep } = require('../../tools/utils'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const { expect } = require('chai'); +const BSON = require('bson'); +const sinon = require('sinon'); +const { Writable } = require('stream'); +const { once, on } = require('events'); +const { setTimeout } = require('timers'); +const { ReadPreference } = require('../../mongodb'); +const { ServerType, MongoClientClosedError } = require('../../mongodb'); +const { formatSort } = require('../../mongodb'); + +describe.skip('Cursor', function () { + before(function () { + return setupDatabase(this.configuration, [ + 'cursorkilltest1', + 'cursor_session_tests', + 'cursor_session_tests2' + ]); + }); + + let client; + + beforeEach(async function () { + client = this.configuration.newClient({ maxPoolSize: 1, monitorCommands: true }); + }); + + afterEach(async function () { + await client.close(); + }); + + it('should not throw an error when toArray and forEach are called after cursor is closed', async function () { + const db = client.db(); + + const collection = await db.collection('test_to_a'); + await collection.insertMany([{ a: 1 }]); + const cursor = collection.find({}); + + const firstToArray = await cursor.toArray().catch(error => error); + expect(firstToArray).to.be.an('array'); + + expect(cursor.closed).to.be.true; + + const secondToArray = await cursor.toArray().catch(error => error); + expect(secondToArray).to.be.an('array'); + expect(secondToArray).to.have.lengthOf(0); + + const forEachResult = await cursor + .forEach(() => { + expect.fail('should not run forEach on an empty/closed cursor'); + }) + .catch(error => error); + expect(forEachResult).to.be.undefined; + }); + + it('cursor should close after first next operation', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('close_on_next', (err, collection) => { + expect(err).to.not.exist; + + collection.insert( + [{ a: 1 }, { a: 1 }, { a: 1 }], + configuration.writeConcernMax(), + err => { + expect(err).to.not.exist; + + var cursor = collection.find({}); + this.defer(() => cursor.close()); + + cursor.batchSize(2); + cursor.next(err => { + expect(err).to.not.exist; + done(); + }); + } + ); + }); + }); + } + }); + + it('cursor should trigger getMore', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('trigger_get_more', (err, collection) => { + expect(err).to.not.exist; + + collection.insert( + [{ a: 1 }, { a: 1 }, { a: 1 }], + configuration.writeConcernMax(), + err => { + expect(err).to.not.exist; + const cursor = collection.find({}).batchSize(2); + this.defer(() => cursor.close()); + cursor.toArray(err => { + expect(err).to.not.exist; + done(); + }); + } + ); + }); + }); + } + }); + + it('shouldCorrectlyExecuteCursorExplain', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_explain', (err, collection) => { + expect(err).to.not.exist; + + collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + collection.find({ a: 1 }).explain((err, explanation) => { + expect(err).to.not.exist; + expect(explanation).to.exist; + done(); + }); + }); + }); + }); + } + }); + + it('shouldCorrectlyExecuteCursorCount', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_count', (err, collection) => { + expect(err).to.not.exist; + + collection.find().count(err => { + expect(err).to.not.exist; + + function insert(callback) { + var total = 10; + + for (var i = 0; i < 10; i++) { + collection.insert({ x: i }, configuration.writeConcernMax(), e => { + expect(e).to.not.exist; + total = total - 1; + if (total === 0) callback(); + }); + } + } + + function finished() { + collection.find().count((err, count) => { + expect(err).to.not.exist; + test.equal(10, count); + test.ok(count.constructor === Number); + + collection.find({}, { limit: 5 }).count((err, count) => { + expect(err).to.not.exist; + test.equal(5, count); + + collection.find({}, { skip: 5 }).count((err, count) => { + expect(err).to.not.exist; + test.equal(5, count); + + db.collection('acollectionthatdoesn').count((err, count) => { + expect(err).to.not.exist; + test.equal(0, count); + + var cursor = collection.find(); + cursor.count((err, count) => { + expect(err).to.not.exist; + test.equal(10, count); + + cursor.forEach( + () => {}, + err => { + expect(err).to.not.exist; + cursor.count((err, count2) => { + expect(err).to.not.exist; + expect(count2).to.equal(10); + expect(count2).to.equal(count); + done(); + }); + } + ); + }); + }); + }); + }); + }); + } + + insert(function () { + finished(); + }); + }); + }); + }); + } + }); + + it('should correctly execute cursor count with secondary readPreference', { + metadata: { requires: { topology: 'replicaset' } }, + async test() { + const bag = []; + client.on('commandStarted', filterForCommands(['count'], bag)); + + const cursor = client + .db() + .collection('countTEST') + .find({ qty: { $gt: 4 } }); + await cursor.count({ readPreference: ReadPreference.SECONDARY }); + + const selectedServerAddress = bag[0].address + .replace('127.0.0.1', 'localhost') + .replace('[::1]', 'localhost'); + const selectedServer = client.topology.description.servers.get(selectedServerAddress); + expect(selectedServer).property('type').to.equal(ServerType.RSSecondary); + } + }); + + it('shouldCorrectlyExecuteCursorCountWithDottedCollectionName', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_count.ext', (err, collection) => { + expect(err).to.not.exist; + + collection.find().count(err => { + expect(err).to.not.exist; + + function insert(callback) { + var total = 10; + + for (var i = 0; i < 10; i++) { + collection.insert({ x: i }, configuration.writeConcernMax(), e => { + expect(e).to.not.exist; + total = total - 1; + if (total === 0) callback(); + }); + } + } + + function finished() { + collection.find().count((err, count) => { + expect(err).to.not.exist; + test.equal(10, count); + test.ok(count.constructor === Number); + + collection.find({}, { limit: 5 }).count((err, count) => { + expect(err).to.not.exist; + test.equal(5, count); + + collection.find({}, { skip: 5 }).count((err, count) => { + expect(err).to.not.exist; + test.equal(5, count); + + db.collection('acollectionthatdoesn').count((err, count) => { + expect(err).to.not.exist; + test.equal(0, count); + + var cursor = collection.find(); + cursor.count((err, count) => { + expect(err).to.not.exist; + test.equal(10, count); + + cursor.forEach( + () => {}, + err => { + expect(err).to.not.exist; + cursor.count((err, count2) => { + expect(err).to.not.exist; + expect(count2).to.equal(10); + expect(count2).to.equal(count); + done(); + }); + } + ); + }); + }); + }); + }); + }); + } + + insert(function () { + finished(); + }); + }); + }); + }); + } + }); + + it('shouldThrowErrorOnEachWhenMissingCallback', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_each', (err, collection) => { + expect(err).to.not.exist; + function insert(callback) { + var total = 10; + + for (var i = 0; i < 10; i++) { + collection.insert({ x: i }, configuration.writeConcernMax(), e => { + expect(e).to.not.exist; + total = total - 1; + if (total === 0) callback(); + }); + } + } + + function finished() { + const cursor = collection.find(); + + test.throws(function () { + cursor.forEach(); + }); + + done(); + } + + insert(function () { + finished(); + }); + }); + }); + } + }); + + it('shouldCorrectlyHandleLimitOnCursor', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_cursor_limit', (err, collection) => { + function insert(callback) { + var total = 10; + + for (var i = 0; i < 10; i++) { + collection.insert({ x: i }, configuration.writeConcernMax(), e => { + expect(e).to.not.exist; + total = total - 1; + if (total === 0) callback(); + }); + } + } + + function finished() { + collection + .find() + .limit(5) + .toArray((err, items) => { + test.equal(5, items.length); + + // Let's close the db + expect(err).to.not.exist; + done(); + }); + } + + insert(function () { + finished(); + }); + }); + }); + } + }); + + it('shouldCorrectlyHandleNegativeOneLimitOnCursor', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_cursor_negative_one_limit', (err, collection) => { + expect(err).to.not.exist; + function insert(callback) { + var total = 10; + + for (var i = 0; i < 10; i++) { + collection.insert({ x: i }, configuration.writeConcernMax(), e => { + expect(e).to.not.exist; + total = total - 1; + if (total === 0) callback(); + }); + } + } + + function finished() { + collection + .find() + .limit(-1) + .toArray((err, items) => { + expect(err).to.not.exist; + test.equal(1, items.length); + + // Let's close the db + done(); + }); + } + + insert(function () { + finished(); + }); + }); + }); + } + }); + + it('shouldCorrectlyHandleAnyNegativeLimitOnCursor', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_cursor_any_negative_limit', (err, collection) => { + expect(err).to.not.exist; + function insert(callback) { + var total = 10; + + for (var i = 0; i < 10; i++) { + collection.insert({ x: i }, configuration.writeConcernMax(), e => { + expect(e).to.not.exist; + total = total - 1; + if (total === 0) callback(); + }); + } + } + + function finished() { + collection + .find() + .limit(-5) + .toArray((err, items) => { + expect(err).to.not.exist; + test.equal(5, items.length); + + // Let's close the db + done(); + }); + } + + insert(function () { + finished(); + }); + }); + }); + } + }); + + it('shouldCorrectlyReturnErrorsOnIllegalLimitValuesNotAnInt', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_limit_exceptions_2', (err, collection) => { + expect(err).to.not.exist; + + collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + const cursor = collection.find(); + this.defer(() => cursor.close()); + + try { + cursor.limit('not-an-integer'); + } catch (err) { + test.equal('Operation "limit" requires an integer', err.message); + } + + done(); + }); + }); + }); + } + }); + + it('shouldCorrectlyReturnErrorsOnIllegalLimitValuesIsClosedWithinNext', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_limit_exceptions', (err, collection) => { + expect(err).to.not.exist; + + collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + const cursor = collection.find(); + this.defer(() => cursor.close()); + + cursor.next(err => { + expect(err).to.not.exist; + expect(() => { + cursor.limit(1); + }).to.throw(/Cursor is already initialized/); + + done(); + }); + }); + }); + }); + } + }); + + // NOTE: who cares what you set when the cursor is closed? + it.skip('shouldCorrectlyReturnErrorsOnIllegalLimitValuesIsClosedWithinClose', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_limit_exceptions_1', (err, collection) => { + expect(err).to.not.exist; + + collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + const cursor = collection.find(); + cursor.close(err => { + expect(err).to.not.exist; + expect(() => { + cursor.limit(1); + }).to.throw(/not extensible/); + + done(); + }); + }); + }); + }); + } + }); + + it('shouldCorrectlySkipRecordsOnCursor', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_skip', (err, collection) => { + expect(err).to.not.exist; + + const insert = callback => { + var total = 10; + for (var i = 0; i < 10; i++) { + collection.insert({ x: i }, configuration.writeConcernMax(), e => { + expect(e).to.not.exist; + + total = total - 1; + if (total === 0) callback(); + }); + } + }; + + insert(() => { + const cursor = collection.find(); + this.defer(() => cursor.close()); + + cursor.count((err, count) => { + expect(err).to.not.exist; + test.equal(10, count); + }); + + const cursor2 = collection.find(); + this.defer(() => cursor2.close()); + + cursor2.toArray((err, items) => { + expect(err).to.not.exist; + test.equal(10, items.length); + + collection + .find() + .skip(2) + .toArray((err, items2) => { + expect(err).to.not.exist; + test.equal(8, items2.length); + + // Check that we have the same elements + var numberEqual = 0; + var sliced = items.slice(2, 10); + + for (var i = 0; i < sliced.length; i++) { + if (sliced[i].x === items2[i].x) numberEqual = numberEqual + 1; + } + + test.equal(8, numberEqual); + done(); + }); + }); + }); + }); + }); + } + }); + + it('shouldCorrectlyReturnErrorsOnIllegalSkipValues', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_skip_exceptions', (err, collection) => { + expect(err).to.not.exist; + collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + try { + collection.find().skip('not-an-integer'); + } catch (err) { + test.equal('Operation "skip" requires an integer', err.message); + } + + const cursor = collection.find(); + cursor.next(err => { + expect(err).to.not.exist; + + // NOTE: who cares what you set when closed, if not initialized + // expect(() => { + // cursor.skip(1); + // }).to.throw(/not extensible/); + + const cursor2 = collection.find(); + cursor2.close(err => { + expect(err).to.not.exist; + + // NOTE: who cares what you set when closed, if not initialized + // expect(() => { + // cursor2.skip(1); + // }).to.throw(/not extensible/); + + done(); + }); + }); + }); + }); + }); + } + }); + + it('shouldReturnErrorsOnIllegalBatchSizes', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_batchSize_exceptions', (err, collection) => { + expect(err).to.not.exist; + collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + let cursor = collection.find(); + try { + cursor.batchSize('not-an-integer'); + test.ok(false); + } catch (err) { + test.equal('Operation "batchSize" requires an integer', err.message); + } + + cursor = collection.find(); + cursor.next(err => { + expect(err).to.not.exist; + + cursor.next(err => { + expect(err).to.not.exist; + + // NOTE: who cares what you set when closed, if not initialized + // expect(() => { + // cursor.batchSize(1); + // }).to.throw(/not extensible/); + + const cursor2 = collection.find(); + cursor2.close(err => { + expect(err).to.not.exist; + + // NOTE: who cares what you set when closed, if not initialized + // expect(() => { + // cursor2.batchSize(1); + // }).to.throw(/not extensible/); + + done(); + }); + }); + }); + }); + }); + }); + } + }); + + it('shouldCorrectlyHandleBatchSize', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_multiple_batch_size', (err, collection) => { + expect(err).to.not.exist; + + //test with the last batch that is a multiple of batchSize + var records = 4; + var batchSize = 2; + var docs = []; + for (var i = 0; i < records; i++) { + docs.push({ a: i }); + } + + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + const cursor = collection.find({}, { batchSize: batchSize }); + + //1st + cursor.next((err, items) => { + expect(err).to.not.exist; + test.equal(1, cursor.bufferedCount()); + test.ok(items != null); + + //2nd + cursor.next((err, items) => { + expect(err).to.not.exist; + test.equal(0, cursor.bufferedCount()); + test.ok(items != null); + + //3rd + cursor.next((err, items) => { + expect(err).to.not.exist; + test.equal(1, cursor.bufferedCount()); + test.ok(items != null); + + //4th + cursor.next((err, items) => { + expect(err).to.not.exist; + test.equal(0, cursor.bufferedCount()); + test.ok(items != null); + + //No more + cursor.next((err, items) => { + expect(err).to.not.exist; + test.ok(items == null); + test.ok(cursor.closed); + done(); + }); + }); + }); + }); + }); + }); + }); + }); + } + }); + + it('shouldHandleWhenLimitBiggerThanBatchSize', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_limit_greater_than_batch_size', (err, collection) => { + expect(err).to.not.exist; + + var limit = 4; + var records = 10; + var batchSize = 3; + var docs = []; + for (var i = 0; i < records; i++) { + docs.push({ a: i }); + } + + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + var cursor = collection.find({}, { batchSize: batchSize, limit: limit }); + //1st + cursor.next(err => { + expect(err).to.not.exist; + test.equal(2, cursor.bufferedCount()); + + //2nd + cursor.next(err => { + expect(err).to.not.exist; + test.equal(1, cursor.bufferedCount()); + + //3rd + cursor.next(err => { + expect(err).to.not.exist; + test.equal(0, cursor.bufferedCount()); + + //4th + cursor.next(err => { + expect(err).to.not.exist; + + //No more + cursor.next((err, items) => { + expect(err).to.not.exist; + test.ok(items == null); + test.ok(cursor.closed); + done(); + }); + }); + }); + }); + }); + }); + }); + }); + } + }); + + it('shouldHandleLimitLessThanBatchSize', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_limit_less_than_batch_size', (err, collection) => { + expect(err).to.not.exist; + + var limit = 2; + var records = 10; + var batchSize = 4; + var docs = []; + for (var i = 0; i < records; i++) { + docs.push({ a: i }); + } + + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + var cursor = collection.find({}, { batchSize: batchSize, limit: limit }); + //1st + cursor.next(err => { + expect(err).to.not.exist; + test.equal(1, cursor.bufferedCount()); + + //2nd + cursor.next(err => { + expect(err).to.not.exist; + test.equal(0, cursor.bufferedCount()); + + //No more + cursor.next((err, items) => { + expect(err).to.not.exist; + test.ok(items == null); + test.ok(cursor.closed); + done(); + }); + }); + }); + }); + }); + }); + } + }); + + it('shouldHandleSkipLimitChaining', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + var collection = db.collection('shouldHandleSkipLimitChaining'); + + function insert(callback) { + var total = 10; + + for (var i = 0; i < 10; i++) { + collection.insert({ x: i }, configuration.writeConcernMax(), e => { + expect(e).to.not.exist; + total = total - 1; + if (total === 0) callback(); + }); + } + } + + function finished() { + collection.find().toArray((err, items) => { + expect(err).to.not.exist; + test.equal(10, items.length); + + collection + .find() + .limit(5) + .skip(3) + .toArray(function (err, items2) { + expect(err).to.not.exist; + test.equal(5, items2.length); + + // Check that we have the same elements + var numberEqual = 0; + var sliced = items.slice(3, 8); + + for (var i = 0; i < sliced.length; i++) { + if (sliced[i].x === items2[i].x) numberEqual = numberEqual + 1; + } + test.equal(5, numberEqual); + done(); + }); + }); + } + + insert(function () { + finished(); + }); + }); + } + }); + + it('shouldCorrectlyHandleLimitSkipChainingInline', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_limit_skip_chaining_inline', (err, collection) => { + expect(err).to.not.exist; + + function insert(callback) { + var total = 10; + + for (var i = 0; i < 10; i++) { + collection.insert({ x: i }, configuration.writeConcernMax(), e => { + expect(e).to.not.exist; + total = total - 1; + if (total === 0) callback(); + }); + } + } + + function finished() { + collection.find().toArray((err, items) => { + expect(err).to.not.exist; + test.equal(10, items.length); + + collection + .find() + .limit(5) + .skip(3) + .toArray(function (err, items2) { + expect(err).to.not.exist; + test.equal(5, items2.length); + + // Check that we have the same elements + var numberEqual = 0; + var sliced = items.slice(3, 8); + + for (var i = 0; i < sliced.length; i++) { + if (sliced[i].x === items2[i].x) numberEqual = numberEqual + 1; + } + test.equal(5, numberEqual); + done(); + }); + }); + } + + insert(function () { + finished(); + }); + }); + }); + } + }); + + it('shouldCloseCursorNoQuerySent', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_close_no_query_sent', (err, collection) => { + expect(err).to.not.exist; + + const cursor = collection.find(); + cursor.close(err => { + expect(err).to.not.exist; + test.equal(true, cursor.closed); + done(); + }); + }); + }); + } + }); + + it('shouldCorrectlyRefillViaGetMoreCommand', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + var COUNT = 1000; + + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_refill_via_get_more', (err, collection) => { + expect(err).to.not.exist; + + function insert(callback) { + var docs = []; + + for (var i = 0; i < COUNT; i++) { + docs.push({ a: i }); + } + + collection.insertMany(docs, configuration.writeConcernMax(), callback); + } + + function finished() { + collection.count((err, count) => { + expect(err).to.not.exist; + test.equal(COUNT, count); + }); + + var total = 0; + collection.find({}, {}).forEach( + item => { + total = total + item.a; + }, + err => { + expect(err).to.not.exist; + test.equal(499500, total); + + collection.count((err, count) => { + expect(err).to.not.exist; + test.equal(COUNT, count); + }); + + collection.count((err, count) => { + expect(err).to.not.exist; + test.equal(COUNT, count); + + var total2 = 0; + collection.find().forEach( + item => { + total2 = total2 + item.a; + }, + err => { + expect(err).to.not.exist; + test.equal(499500, total2); + collection.count((err, count) => { + expect(err).to.not.exist; + test.equal(COUNT, count); + test.equal(total, total2); + done(); + }); + } + ); + }); + } + ); + } + + insert(function () { + finished(); + }); + }); + }); + } + }); + + it('shouldCorrectlyRefillViaGetMoreAlternativeCollection', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_refill_via_get_more_alt_coll', (err, collection) => { + expect(err).to.not.exist; + var COUNT = 1000; + + function insert(callback) { + var docs = []; + + for (var i = 0; i < COUNT; i++) { + docs.push({ a: i }); + } + + collection.insertMany(docs, configuration.writeConcernMax(), callback); + } + + function finished() { + collection.count((err, count) => { + expect(err).to.not.exist; + test.equal(1000, count); + }); + + var total = 0; + collection.find().forEach( + doc => { + total = total + doc.a; + }, + err => { + expect(err).to.not.exist; + test.equal(499500, total); + + collection.count((err, count) => { + expect(err).to.not.exist; + test.equal(1000, count); + }); + + collection.count((err, count) => { + expect(err).to.not.exist; + test.equal(1000, count); + + var total2 = 0; + collection.find().forEach( + doc => { + total2 = total2 + doc.a; + }, + err => { + expect(err).to.not.exist; + expect(total2).to.equal(499500); + + collection.count((err, count) => { + expect(err).to.not.exist; + expect(count).to.equal(1000); + expect(total2).to.equal(total); + done(); + }); + } + ); + }); + } + ); + } + + insert(function () { + finished(); + }); + }); + }); + } + }); + + it('shouldCloseCursorAfterQueryHasBeenSent', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_close_after_query_sent', (err, collection) => { + expect(err).to.not.exist; + + collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + const cursor = collection.find({ a: 1 }); + cursor.next(err => { + expect(err).to.not.exist; + + cursor.close(err => { + expect(err).to.not.exist; + test.equal(true, cursor.closed); + done(); + }); + }); + }); + }); + }); + } + }); + + it('shouldCorrectlyExecuteCursorCountWithFields', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_count_with_fields', (err, collection) => { + expect(err).to.not.exist; + + collection.insertOne({ x: 1, a: 2 }, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + collection + .find({}) + .project({ a: 1 }) + .toArray((err, items) => { + expect(err).to.not.exist; + test.equal(1, items.length); + test.equal(2, items[0].a); + expect(items[0].x).to.not.exist; + done(); + }); + }); + }); + }); + } + }); + + it('shouldCorrectlyCountWithFieldsUsingExclude', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('test_count_with_fields_using_exclude', (err, collection) => { + expect(err).to.not.exist; + + collection.insertOne({ x: 1, a: 2 }, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + collection.find({}, { projection: { x: 0 } }).toArray((err, items) => { + expect(err).to.not.exist; + test.equal(1, items.length); + test.equal(2, items[0].a); + expect(items[0].x).to.not.exist; + done(); + }); + }); + }); + }); + } + }); + + it('Should correctly execute count on cursor', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + var docs = []; + + for (var i = 0; i < 1000; i++) { + var d = new Date().getTime() + i * 1000; + docs[i] = { a: i, createdAt: new Date(d) }; + } + + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('Should_correctly_execute_count_on_cursor_1', (err, collection) => { + expect(err).to.not.exist; + + // insert all docs + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + let total = 0; + // Create a cursor for the content + const cursor = collection.find({}); + this.defer(() => cursor.close()); + + cursor.count(err => { + expect(err).to.not.exist; + // Ensure each returns all documents + cursor.forEach( + () => { + total++; + }, + err => { + expect(err).to.not.exist; + cursor.count((err, c) => { + expect(err).to.not.exist; + expect(c).to.equal(1000); + expect(total).to.equal(1000); + done(); + }); + } + ); + }); + }); + }); + }); + } + }); + + it('does not auto destroy streams', function (done) { + const docs = []; + + for (var i = 0; i < 10; i++) { + docs.push({ a: i + 1 }); + } + + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + + const db = client.db(configuration.db); + db.createCollection('does_not_autodestroy_streams', (err, collection) => { + expect(err).to.not.exist; + + collection.insertMany(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + const cursor = collection.find(); + const stream = cursor.stream(); + stream.on('close', () => { + expect.fail('extra close event must not be called'); + }); + stream.on('end', () => { + client.close(); + done(); + }); + stream.on('data', doc => { + expect(doc).to.exist; + }); + stream.resume(); + }); + }); + }); + }); + + it('should be able to stream documents', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + var docs = []; + + for (var i = 0; i < 1000; i++) { + docs[i] = { a: i + 1 }; + } + + var count = 0; + + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('Should_be_able_to_stream_documents', (err, collection) => { + expect(err).to.not.exist; + + // insert all docs + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + var paused = 0, + closed = 0, + resumed = 0, + i = 0; + + const cursor = collection.find(); + const stream = cursor.stream(); + + stream.on('data', function (doc) { + test.equal(true, !!doc); + test.equal(true, !!doc.a); + count = count + 1; + + if (paused > 0 && 0 === resumed) { + err = new Error('data emitted during pause'); + return testDone(); + } + + if (++i === 3) { + stream.pause(); + paused++; + + setTimeout(function () { + stream.resume(); + resumed++; + }, 20); + } + }); + + stream.once('error', function (er) { + err = er; + testDone(); + }); + + stream.once('end', function () { + closed++; + testDone(); + }); + + function testDone() { + expect(err).to.not.exist; + test.equal(i, docs.length); + test.equal(1, closed); + test.equal(1, paused); + test.equal(1, resumed); + test.strictEqual(cursor.closed, true); + done(); + } + }); + }); + }); + } + }); + + it('immediately destroying a stream prevents the query from executing', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + var i = 0, + docs = [{ b: 2 }, { b: 3 }], + doneCalled = 0; + + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection( + 'immediately_destroying_a_stream_prevents_the_query_from_executing', + (err, collection) => { + expect(err).to.not.exist; + + // insert all docs + collection.insertMany(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + const cursor = collection.find(); + const stream = cursor.stream(); + + stream.on('data', function () { + i++; + }); + + cursor.once('close', testDone('close')); + stream.once('error', testDone('error')); + + stream.destroy(); + + function testDone() { + return err => { + ++doneCalled; + + if (doneCalled === 1) { + expect(err).to.not.exist; + test.strictEqual(0, i); + test.strictEqual(true, cursor.closed); + done(); + } + }; + } + }); + } + ); + }); + } + }); + + it('removes session when cloning an find cursor', async function () { + const collection = await client.db().collection('test'); + + const cursor = collection.find({}); + await cursor.next(); + + const clonedCursor = cursor.clone(); + + expect(cursor).to.have.property('session').not.to.be.null; + expect(clonedCursor).to.have.property('session').to.be.null; + }); + + it('removes session when cloning an aggregation cursor', async function () { + const collection = await client.db().collection('test'); + + const cursor = collection.aggregate([{ $match: {} }]); + await cursor.next(); + + const clonedCursor = cursor.clone(); + + expect(cursor).to.have.property('session').not.to.be.null; + expect(clonedCursor).to.have.property('session').to.be.null; + }); + + it('destroying a stream stops it', async function () { + const db = client.db(); + await db.dropCollection('destroying_a_stream_stops_it').catch(() => null); + const collection = await db.createCollection('destroying_a_stream_stops_it'); + + const docs = Array.from({ length: 10 }, (_, i) => ({ b: i + 1 })); + + await collection.insertMany(docs); + + const cursor = collection.find(); + const stream = cursor.stream(); + + expect(cursor).property('closed', false); + + const willClose = once(cursor, 'close'); + + const dataEvents = on(stream, 'data'); + + for (let i = 0; i < 5; i++) { + let { + value: [doc] + } = await dataEvents.next(); + expect(doc).property('b', i + 1); + } + + // After 5 successful data events, destroy stream + stream.destroy(); + + // We should get a close event on the stream and a close event on the cursor + // We should **not** get an 'error' or an 'end' event, + // the following will throw if either stream or cursor emitted an 'error' event + await Promise.race([ + willClose, + sleep(100).then(() => Promise.reject(new Error('close event never emitted'))) + ]); + }); + + // NOTE: skipped for use of topology manager + it.skip('cursor stream errors', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { requires: { topology: ['single'] } }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('cursor_stream_errors', (err, collection) => { + expect(err).to.not.exist; + + var docs = []; + for (var ii = 0; ii < 10; ++ii) docs.push({ b: ii + 1 }); + + // insert all docs + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + var finished = 0, + i = 0; + + const cursor = collection.find({}, { batchSize: 5 }); + const stream = cursor.stream(); + + stream.on('data', function () { + if (++i === 4) { + // Force restart + configuration.manager.stop(9); + } + }); + + stream.once('close', testDone('close')); + stream.once('error', testDone('error')); + + function testDone() { + return function () { + ++finished; + + if (finished === 2) { + setTimeout(function () { + test.equal(5, i); + test.equal(true, cursor.closed); + client.close(); + + configuration.manager.start().then(function () { + done(); + }); + }, 150); + } + }; + } + }); + }); + }); + } + }); + + it('cursor stream pipe', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('cursor_stream_pipe', (err, collection) => { + expect(err).to.not.exist; + + var docs = []; + 'Aaden Aaron Adrian Aditya Bob Joe'.split(' ').forEach(function (name) { + docs.push({ name: name }); + }); + + // insert all docs + collection.insertMany(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + const filename = path.join(os.tmpdir(), '_nodemongodbnative_stream_out.txt'); + const out = fs.createWriteStream(filename); + const stream = collection.find().stream().map(JSON.stringify); + + stream.pipe(out); + // Wait for output stream to close + out.on('close', testDone); + + function testDone(err) { + // Object.prototype.toString = toString; + test.strictEqual(undefined, err); + var contents = fs.readFileSync(filename, 'utf8'); + test.ok(/Aaden/.test(contents)); + test.ok(/Aaron/.test(contents)); + test.ok(/Adrian/.test(contents)); + test.ok(/Aditya/.test(contents)); + test.ok(/Bob/.test(contents)); + test.ok(/Joe/.test(contents)); + fs.unlinkSync(filename); + done(); + } + }); + }); + }); + } + }); + + it( + 'closes cursors when client is closed even if it has not been exhausted', + { requires: { topology: '!replicaset' } }, + async function () { + await client + .db() + .dropCollection('test_cleanup_tailable') + .catch(() => null); + + const collection = await client + .db() + .createCollection('test_cleanup_tailable', { capped: true, size: 1000, max: 3 }); + + // insert only 2 docs in capped coll of 3 + await collection.insertMany([{ a: 1 }, { a: 1 }]); + + const cursor = collection.find({}, { tailable: true, awaitData: true, maxAwaitTimeMS: 2000 }); + + await cursor.next(); + await cursor.next(); + + const nextCommand = once(client, 'commandStarted'); + // will block for maxAwaitTimeMS (except we are closing the client) + const rejectedEarlyBecauseClientClosed = cursor.next().catch(error => error); + + for ( + let [{ commandName }] = await nextCommand; + commandName !== 'getMore'; + [{ commandName }] = await once(client, 'commandStarted') + ); + + await client.close(); + expect(cursor).to.have.property('closed', true); + + const error = await rejectedEarlyBecauseClientClosed; + expect(error).to.be.instanceOf(MongoClientClosedError); + } + ); + + it('shouldAwaitDataWithDocumentsAvailable', function (done) { + // www.mongodb.com/docs/display/DOCS/Tailable+Cursors + + const configuration = this.configuration; + const client = configuration.newClient({ maxPoolSize: 1 }); + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + const options = { capped: true, size: 8 }; + db.createCollection('should_await_data_no_docs', options, (err, collection) => { + expect(err).to.not.exist; + + // Create cursor with awaitData, and timeout after the period specified + const cursor = collection.find({}, { tailable: true, awaitData: true }); + this.defer(() => cursor.close()); + + cursor.forEach( + () => {}, + err => { + expect(err).to.not.exist; + done(); + } + ); + }); + }); + }); + + context('awaiting data core tailable cursor test', () => { + let client; + let cursor; + + beforeEach(async function () { + client = await this.configuration.newClient().connect(); + }); + + afterEach(async () => { + if (cursor) await cursor.close(); + await client.close(); + }); + + it( + 'should block waiting for new data to arrive when the cursor reaches the end of the capped collection', + { + metadata: { requires: { mongodb: '>=3.2' } }, + async test() { + const db = client.db('cursor_tailable'); + + try { + await db.collection('cursor_tailable').drop(); + // eslint-disable-next-line no-empty + } catch {} + + const collection = await db.createCollection('cursor_tailable', { + capped: true, + size: 10000 + }); + + const res = await collection.insertOne({ a: 1 }); + expect(res).property('insertedId').to.exist; + + cursor = collection.find({}, { batchSize: 2, tailable: true, awaitData: true }); + const doc0 = await cursor.next(); + expect(doc0).to.have.property('a', 1); + + // After 300ms make an insert + const later = runLater(async () => { + const res = await collection.insertOne({ b: 2 }); + expect(res).property('insertedId').to.exist; + }, 300); + + const start = performance.now(); + const doc1 = await cursor.next(); + expect(doc1).to.have.property('b', 2); + const end = performance.now(); + + await later; // make sure this finished, without a failure + + // We should see here that cursor.next blocked for at least 300ms + expect(end - start).to.be.at.least(290); + } + } + ); + }); + + // NOTE: should we continue to let users explicitly `kill` a cursor? + it.skip('Should correctly retry tailable cursor connection', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + // www.mongodb.com/docs/display/DOCS/Tailable+Cursors + + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + const options = { capped: true, size: 8 }; + db.createCollection('should_await_data', options, (err, collection) => { + expect(err).to.not.exist; + + collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + // Create cursor with awaitData, and timeout after the period specified + var cursor = collection.find({}, { tailable: true, awaitData: true }); + cursor.forEach( + () => cursor.kill(), + () => { + // kill cursor b/c cursor is tailable / awaitable + cursor.close(done); + } + ); + }); + }); + }); + } + }); + + it('shouldCorrectExecuteExplainHonoringLimit', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + var docs = []; + docs[0] = { + _keywords: [ + 'compact', + 'ii2gd', + 'led', + '24-48v', + 'presse-etoupe', + 'bexbgl1d24483', + 'flash', + '48v', + 'eexd', + 'feu', + 'presse', + 'compris', + 'rouge', + 'etoupe', + 'iic', + 'ii2gdeexdiict5', + 'red', + 'aet' + ] + }; + docs[1] = { + _keywords: [ + 'reducteur', + '06212', + 'd20/16', + 'manch', + 'd20', + 'manchon', + 'ard', + 'sable', + 'irl', + 'red' + ] + }; + docs[2] = { + _keywords: [ + 'reducteur', + '06214', + 'manch', + 'd25/20', + 'd25', + 'manchon', + 'ard', + 'sable', + 'irl', + 'red' + ] + }; + docs[3] = { + _keywords: [ + 'bar', + 'rac', + 'boite', + '6790178', + '50-240/4-35', + '240', + 'branch', + 'coulee', + 'ddc', + 'red', + 'ip2x' + ] + }; + docs[4] = { + _keywords: [ + 'bar', + 'ip2x', + 'boite', + '6790158', + 'ddi', + '240', + 'branch', + 'injectee', + '50-240/4-35?', + 'red' + ] + }; + docs[5] = { + _keywords: [ + 'bar', + 'ip2x', + 'boite', + '6790179', + 'coulee', + '240', + 'branch', + 'sdc', + '50-240/4-35?', + 'red', + 'rac' + ] + }; + docs[6] = { + _keywords: [ + 'bar', + 'ip2x', + 'boite', + '6790159', + '240', + 'branch', + 'injectee', + '50-240/4-35?', + 'sdi', + 'red' + ] + }; + docs[7] = { + _keywords: [ + '6000', + 'r-6000', + 'resin', + 'high', + '739680', + 'red', + 'performance', + 'brd', + 'with', + 'ribbon', + 'flanges' + ] + }; + docs[8] = { _keywords: ['804320', 'for', 'paint', 'roads', 'brd', 'red'] }; + docs[9] = { _keywords: ['38mm', 'padlock', 'safety', '813594', 'brd', 'red'] }; + docs[10] = { _keywords: ['114551', 'r6900', 'for', 'red', 'bmp71', 'brd', 'ribbon'] }; + docs[11] = { + _keywords: ['catena', 'diameter', '621482', 'rings', 'brd', 'legend', 'red', '2mm'] + }; + docs[12] = { + _keywords: ['catena', 'diameter', '621491', 'rings', '5mm', 'brd', 'legend', 'red'] + }; + docs[13] = { + _keywords: ['catena', 'diameter', '621499', 'rings', '3mm', 'brd', 'legend', 'red'] + }; + docs[14] = { + _keywords: ['catena', 'diameter', '621508', 'rings', '5mm', 'brd', 'legend', 'red'] + }; + docs[15] = { + _keywords: [ + 'insert', + 'for', + 'cable', + '3mm', + 'carrier', + '621540', + 'blank', + 'brd', + 'ademark', + 'red' + ] + }; + docs[16] = { + _keywords: [ + 'insert', + 'for', + 'cable', + '621544', + '3mm', + 'carrier', + 'brd', + 'ademark', + 'legend', + 'red' + ] + }; + docs[17] = { + _keywords: ['catena', 'diameter', '6mm', '621518', 'rings', 'brd', 'legend', 'red'] + }; + docs[18] = { + _keywords: ['catena', 'diameter', '621455', '8mm', 'rings', 'brd', 'legend', 'red'] + }; + docs[19] = { + _keywords: ['catena', 'diameter', '621464', 'rings', '5mm', 'brd', 'legend', 'red'] + }; + + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + // Insert all the docs + var collection = db.collection('shouldCorrectExecuteExplainHonoringLimit'); + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + collection.createIndex({ _keywords: 1 }, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + collection + .find({ _keywords: 'red' }) + .limit(10) + .toArray(function (err, result) { + expect(err).to.not.exist; + test.ok(result != null); + + collection + .find({ _keywords: 'red' }, {}) + .limit(10) + .explain(function (err, result) { + expect(err).to.not.exist; + test.ok(result != null); + + done(); + }); + }); + }); + }); + }); + } + }); + + it('shouldNotExplainWhenFalse', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + var doc = { name: 'camera', _keywords: ['compact', 'ii2gd', 'led', 'red', 'aet'] }; + + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + var collection = db.collection('shouldNotExplainWhenFalse'); + collection.insert(doc, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + collection + .find({ _keywords: 'red' }) + .limit(10) + .toArray(function (err, result) { + expect(err).to.not.exist; + + test.equal('camera', result[0].name); + done(); + }); + }); + }); + } + }); + + it('shouldFailToSetReadPreferenceOnCursor', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + try { + db.collection('shouldFailToSetReadPreferenceOnCursor') + .find() + .withReadPreference('notsecondary'); + test.ok(false); + } catch (err) {} // eslint-disable-line + + db.collection('shouldFailToSetReadPreferenceOnCursor') + .find() + .withReadPreference('secondary'); + + done(); + }); + } + }); + + it('should allow setting the cursors readConcern through a builder', { + metadata: { requires: { mongodb: '>=3.2' } }, + test: function (done) { + const client = this.configuration.newClient({ monitorCommands: true }); + const events = []; + client.on('commandStarted', event => { + if (event.commandName === 'find') { + events.push(event); + } + }); + const db = client.db(this.configuration.db); + const cursor = db.collection('foo').find().withReadConcern('local'); + expect(cursor).property('readConcern').to.have.property('level').equal('local'); + + cursor.toArray(err => { + expect(err).to.not.exist; + + expect(events).to.have.length(1); + const findCommand = events[0]; + expect(findCommand).nested.property('command.readConcern').to.eql({ level: 'local' }); + client.close(done); + }); + } + }); + + it('shouldNotFailDueToStackOverflowEach', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('shouldNotFailDueToStackOverflowEach', (err, collection) => { + expect(err).to.not.exist; + + var docs = []; + var total = 0; + for (var i = 0; i < 30000; i++) docs.push({ a: i }); + var allDocs = []; + var left = 0; + + while (docs.length > 0) { + allDocs.push(docs.splice(0, 1000)); + } + // Get all batches we must insert + left = allDocs.length; + var totalI = 0; + + // Execute inserts + for (i = 0; i < left; i++) { + collection.insert(allDocs.shift(), configuration.writeConcernMax(), function (err, d) { + expect(err).to.not.exist; + + left = left - 1; + totalI = totalI + d.length; + + if (left === 0) { + collection.find({}).forEach( + () => { + total++; + }, + err => { + expect(err).to.not.exist; + expect(total).to.equal(30000); + done(); + } + ); + } + }); + } + }); + }); + } + }); + + it('should not fail due to stack overflow toArray', async function () { + const configuration = this.configuration; + const db = client.db(configuration.db); + const collection = await db.createCollection('shouldNotFailDueToStackOverflowToArray'); + + var docs = Array.from({ length: 30000 }, (_, i) => ({ a: i })); + var allDocs = []; + var left = 0; + + while (docs.length > 0) { + allDocs.push(docs.splice(0, 1000)); + } + // Get all batches we must insert + left = allDocs.length; + var totalI = 0; + var timeout = 0; + + // Execute inserts + for (let i = 0; i < left; i++) { + await sleep(timeout); + + const d = await collection.insert(allDocs.shift()); + left = left - 1; + totalI = totalI + d.length; + + if (left === 0) { + const items = await collection.find({}).toArray(); + expect(items).to.have.a.lengthOf(3000); + } + timeout = timeout + 100; + } + + await client.close(); + }); + + it('should correctly skip and limit', function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + + const db = client.db(configuration.db); + var collection = db.collection('shouldCorrectlySkipAndLimit'); + var docs = []; + for (var i = 0; i < 100; i++) docs.push({ a: i, OrderNumber: i }); + + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + collection + .find({}, { OrderNumber: 1 }) + .skip(10) + .limit(10) + .toArray((err, items) => { + expect(err).to.not.exist; + test.equal(10, items[0].OrderNumber); + + collection + .find({}, { OrderNumber: 1 }) + .skip(10) + .limit(10) + .count() + .then(count => { + test.equal(10, count); + client.close(done); + }); + }); + }); + }); + }); + + it('shouldFailToTailANormalCollection', function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + var collection = db.collection('shouldFailToTailANormalCollection'); + var docs = []; + for (var i = 0; i < 100; i++) docs.push({ a: i, OrderNumber: i }); + + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + const cursor = collection.find({}, { tailable: true }); + cursor.forEach( + () => {}, + err => { + test.ok(err instanceof Error); + test.ok(typeof err.code === 'number'); + + // Close cursor b/c we did not exhaust cursor + cursor.close(); + done(); + } + ); + }); + }); + }); + + it('shouldCorrectlyUseFindAndCursorCount', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + + // DOC_LINE var client = new MongoClient(new Server('localhost', 27017)); + // DOC_START + // Establish connection to db + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + + // Create a lot of documents to insert + var docs = []; + for (var i = 0; i < 100; i++) { + docs.push({ a: i }); + } + + // Create a collection + db.createCollection('test_close_function_on_cursor_2', (err, collection) => { + expect(err).to.not.exist; + + // Insert documents into collection + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + const cursor = collection.find({}); + + cursor.count((err, count) => { + expect(err).to.not.exist; + test.equal(100, count); + + done(); + }); + }); + }); + }); + // DOC_END + } + }); + + it('should correctly apply hint to count command for cursor', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { + topology: ['single', 'replicaset', 'sharded'], + mongodb: '>2.5.5' + } + }, + + test: function (done) { + const configuration = this.configuration; + + // DOC_LINE var client = new MongoClient(new Server('localhost', 27017)); + // DOC_START + // Establish connection to db + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + var col = db.collection('count_hint'); + + col.insert([{ i: 1 }, { i: 2 }], { writeConcern: { w: 1 } }, err => { + expect(err).to.not.exist; + + col.createIndex({ i: 1 }, err => { + expect(err).to.not.exist; + + col.find({ i: 1 }, { hint: '_id_' }).count((err, count) => { + expect(err).to.not.exist; + test.equal(1, count); + + col.find({}, { hint: '_id_' }).count((err, count) => { + expect(err).to.not.exist; + test.equal(2, count); + + col.find({ i: 1 }, { hint: 'BAD HINT' }).count(err => { + test.ok(err != null); + + col.createIndex({ x: 1 }, { sparse: true }, err => { + expect(err).to.not.exist; + + col.find({ i: 1 }, { hint: 'x_1' }).count((err, count) => { + expect(err).to.not.exist; + test.equal(0, count); + + col.find({}, { hint: 'i_1' }).count((err, count) => { + expect(err).to.not.exist; + test.equal(2, count); + + done(); + }); + }); + }); + }); + }); + }); + }); + }); + }); + // DOC_END + } + }); + + it('Terminate each after first document by returning false', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + + // Create a lot of documents to insert + var docs = []; + for (var i = 0; i < 100; i++) { + docs.push({ a: i }); + } + + // Create a collection + db.createCollection('terminate_each_returning_false', (err, collection) => { + expect(err).to.not.exist; + + // Insert documents into collection + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + var finished = false; + + collection.find({}).forEach( + doc => { + expect(doc).to.exist; + test.equal(finished, false); + finished = true; + + done(); + return false; + }, + err => { + expect(err).to.not.exist; + } + ); + }); + }); + }); + } + }); + + it('Should correctly handle maxTimeMS as part of findOne options', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + var donkey = { + color: 'brown' + }; + + db.collection('donkies').insertOne(donkey, function (err, result) { + expect(err).to.not.exist; + + var query = { _id: result.insertedId }; + var options = { maxTimeMS: 1000 }; + + db.collection('donkies').findOne(query, options, function (err, doc) { + expect(err).to.not.exist; + test.equal('brown', doc.color); + + done(); + }); + }); + }); + } + }); + + it('Should correctly handle batchSize of 2', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + const collectionName = 'should_correctly_handle_batchSize_2'; + db.collection(collectionName).insert([{ x: 1 }, { x: 2 }, { x: 3 }], err => { + expect(err).to.not.exist; + + const cursor = db.collection(collectionName).find({}, { batchSize: 2 }); + this.defer(() => cursor.close()); + + cursor.next(err => { + expect(err).to.not.exist; + + cursor.next(err => { + expect(err).to.not.exist; + + cursor.next(err => { + expect(err).to.not.exist; + done(); + }); + }); + }); + }); + }); + } + }); + + it('Should report database name and collection name', { + metadata: { requires: { topology: ['single'] } }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + const cursor = db.collection('myCollection').find({}); + test.equal('myCollection', cursor.namespace.collection); + test.equal('integration_tests', cursor.namespace.db); + + done(); + }); + } + }); + + it('Should correctly execute count on cursor with maxTimeMS', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + var docs = []; + + for (var i = 0; i < 1000; i++) { + var d = new Date().getTime() + i * 1000; + docs[i] = { a: i, createdAt: new Date(d) }; + } + + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection( + 'Should_correctly_execute_count_on_cursor_2', + function (err, collection) { + expect(err).to.not.exist; + + // insert all docs + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + // Create a cursor for the content + var cursor = collection.find({}); + cursor.limit(100); + cursor.skip(10); + cursor.count({ maxTimeMS: 1000 }, err => { + expect(err).to.not.exist; + + // Create a cursor for the content + var cursor = collection.find({}); + cursor.limit(100); + cursor.skip(10); + cursor.maxTimeMS(100); + + cursor.count(err => { + expect(err).to.not.exist; + done(); + }); + }); + }); + } + ); + }); + } + }); + + it('Should correctly execute count on cursor with maxTimeMS set using legacy method', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + var docs = []; + + for (var i = 0; i < 1000; i++) { + var d = new Date().getTime() + i * 1000; + docs[i] = { a: i, createdAt: new Date(d) }; + } + + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection( + 'Should_correctly_execute_count_on_cursor_3', + function (err, collection) { + expect(err).to.not.exist; + + // insert all docs + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + // Create a cursor for the content + var cursor = collection.find({}, { maxTimeMS: 100 }); + cursor.toArray(err => { + expect(err).to.not.exist; + + done(); + }); + }); + } + ); + }); + } + }); + + it('Should correctly apply map to toArray', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + var docs = []; + + for (var i = 0; i < 1000; i++) { + var d = new Date().getTime() + i * 1000; + docs[i] = { a: i, createdAt: new Date(d) }; + } + + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + var collection = db.collection('map_toArray'); + + // insert all docs + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + // Create a cursor for the content + var cursor = collection + .find({}) + .map(function () { + return { a: 1 }; + }) + .batchSize(5) + .limit(10); + + cursor.toArray(function (err, docs) { + expect(err).to.not.exist; + test.equal(10, docs.length); + + // Ensure all docs where mapped + docs.forEach(doc => { + expect(doc).property('a').to.equal(1); + }); + + done(); + }); + }); + }); + } + }); + + it('Should correctly apply map to next', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + const docs = []; + for (var i = 0; i < 1000; i++) { + const d = new Date().getTime() + i * 1000; + docs[i] = { a: i, createdAt: new Date(d) }; + } + + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + const collection = db.collection('map_next'); + + // insert all docs + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + // Create a cursor for the content + const cursor = collection + .find({}) + .map(function () { + return { a: 1 }; + }) + .batchSize(5) + .limit(10); + + this.defer(() => cursor.close()); + cursor.next((err, doc) => { + expect(err).to.not.exist; + test.equal(1, doc.a); + done(); + }); + }); + }); + } + }); + + it('Should correctly apply map to each', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + var docs = []; + + for (var i = 0; i < 1000; i++) { + var d = new Date().getTime() + i * 1000; + docs[i] = { a: i, createdAt: new Date(d) }; + } + + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + const collection = db.collection('map_each'); + + // insert all docs + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + // Create a cursor for the content + var cursor = collection + .find({}) + .map(function () { + return { a: 1 }; + }) + .batchSize(5) + .limit(10); + + cursor.forEach( + doc => { + test.equal(1, doc.a); + }, + err => { + expect(err).to.not.exist; + done(); + } + ); + }); + }); + } + }); + + it('Should correctly apply map to forEach', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + var docs = []; + + for (var i = 0; i < 1000; i++) { + var d = new Date().getTime() + i * 1000; + docs[i] = { a: i, createdAt: new Date(d) }; + } + + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + var collection = db.collection('map_forEach'); + + // insert all docs + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + // Create a cursor for the content + var cursor = collection + .find({}) + .map(function () { + return { a: 2 }; + }) + .map(function (x) { + return { a: x.a * x.a }; + }) + .batchSize(5) + .limit(10); + + cursor.forEach( + doc => { + test.equal(4, doc.a); + }, + err => { + expect(err).to.not.exist; + done(); + } + ); + }); + }); + } + }); + + it('Should correctly apply multiple uses of map and apply forEach', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + var docs = []; + + for (var i = 0; i < 1000; i++) { + var d = new Date().getTime() + i * 1000; + docs[i] = { a: i, createdAt: new Date(d) }; + } + + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + var collection = db.collection('map_mapmapforEach'); + + // insert all docs + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + // Create a cursor for the content + var cursor = collection + .find({}) + .map(function () { + return { a: 1 }; + }) + .batchSize(5) + .limit(10); + + cursor.forEach( + doc => { + expect(doc).property('a').to.equal(1); + }, + err => { + expect(err).to.not.exist; + done(); + } + ); + }); + }); + } + }); + + it('Should correctly apply skip and limit to large set of documents', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { requires: { topology: ['single', 'replicaset'] } }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + var collection = db.collection('cursor_limit_skip_correctly'); + + // Insert x number of docs + var ordered = collection.initializeUnorderedBulkOp(); + + for (var i = 0; i < 6000; i++) { + ordered.insert({ a: i }); + } + + ordered.execute({ writeConcern: { w: 1 } }, err => { + expect(err).to.not.exist; + + // Let's attempt to skip and limit + collection + .find({}) + .limit(2016) + .skip(2016) + .toArray(function (err, docs) { + expect(err).to.not.exist; + test.equal(2016, docs.length); + + done(); + }); + }); + }); + } + }); + + it('should tail cursor using maxAwaitTimeMS for 3.2 or higher', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { requires: { topology: ['single'], mongodb: '<7.0.0' } }, + + test: function (done) { + const configuration = this.configuration; + const client = configuration.newClient(); + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + var options = { capped: true, size: 8 }; + db.createCollection( + 'should_await_data_max_awaittime_ms', + options, + function (err, collection) { + expect(err).to.not.exist; + + collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + // Create cursor with awaitData, and timeout after the period specified + var cursor = collection + .find({}) + .addCursorFlag('tailable', true) + .addCursorFlag('awaitData', true) + .maxAwaitTimeMS(500); + + const s = new Date(); + cursor.forEach( + () => { + setTimeout(() => cursor.close(), 300); + }, + () => { + test.ok(new Date().getTime() - s.getTime() >= 500); + done(); + } + ); + }); + } + ); + }); + } + }); + + it('Should not emit any events after close event emitted due to cursor killed', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { requires: { topology: ['single', 'replicaset'] } }, + + test: function (done) { + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + var collection = db.collection('cursor_limit_skip_correctly'); + + // Insert x number of docs + var ordered = collection.initializeUnorderedBulkOp(); + + for (var i = 0; i < 100; i++) { + ordered.insert({ a: i }); + } + + ordered.execute({ writeConcern: { w: 1 } }, err => { + expect(err).to.not.exist; + + // Let's attempt to skip and limit + var cursor = collection.find({}).batchSize(10); + const stream = cursor.stream(); + stream.on('data', function () { + stream.destroy(); + }); + + cursor.on('close', function () { + done(); + }); + }); + }); + } + }); + + it('shouldCorrectlyExecuteEnsureIndexWithNoCallback', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + var docs = []; + + for (var i = 0; i < 1; i++) { + var d = new Date().getTime() + i * 1000; + docs[i] = { createdAt: new Date(d) }; + } + + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection( + 'shouldCorrectlyExecuteEnsureIndexWithNoCallback', + function (err, collection) { + expect(err).to.not.exist; + + // ensure index of createdAt index + collection.createIndex({ createdAt: 1 }, err => { + expect(err).to.not.exist; + + // insert all docs + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + // Find with sort + collection + .find() + .sort(['createdAt', 'asc']) + .toArray((err, items) => { + expect(err).to.not.exist; + + test.equal(1, items.length); + done(); + }); + }); + }); + } + ); + }); + } + }); + + it('Should correctly execute count on cursor with limit and skip', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + var docs = []; + + for (var i = 0; i < 50; i++) { + var d = new Date().getTime() + i * 1000; + docs[i] = { a: i, createdAt: new Date(d) }; + } + + const configuration = this.configuration; + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('negative_batch_size_and_limit_set', (err, collection) => { + expect(err).to.not.exist; + + // insert all docs + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + // Create a cursor for the content + var cursor = collection.find({}); + cursor + .limit(100) + .skip(0) + .count(function (err, c) { + expect(err).to.not.exist; + test.equal(50, c); + + var cursor = collection.find({}); + cursor + .limit(100) + .skip(0) + .toArray(err => { + expect(err).to.not.exist; + test.equal(50, c); + + done(); + }); + }); + }); + }); + }); + } + }); + + it('Should correctly handle negative batchSize and set the limit', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + var docs = []; + const configuration = this.configuration; + + for (var i = 0; i < 50; i++) { + var d = new Date().getTime() + i * 1000; + docs[i] = { a: i, createdAt: new Date(d) }; + } + + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection( + 'Should_correctly_execute_count_on_cursor_1_', + function (err, collection) { + expect(err).to.not.exist; + + // insert all docs + collection.insert(docs, configuration.writeConcernMax(), err => { + expect(err).to.not.exist; + + // Create a cursor for the content + var cursor = collection.find({}); + cursor.batchSize(-10).next(err => { + expect(err).to.not.exist; + test.ok(cursor.id.equals(BSON.Long.ZERO)); + + done(); + }); + }); + } + ); + }); + } + }); + + it('Correctly decorate the cursor count command with skip, limit, hint, readConcern', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + var started = []; + const configuration = this.configuration; + const client = configuration.newClient(configuration.writeConcernMax(), { + maxPoolSize: 1, + monitorCommands: true + }); + client.on('commandStarted', function (event) { + if (event.commandName === 'count') started.push(event); + }); + + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.collection('cursor_count_test', { readConcern: { level: 'local' } }) + .find({ project: '123' }) + .limit(5) + .skip(5) + .hint({ project: 1 }) + .count(err => { + expect(err).to.not.exist; + test.equal(1, started.length); + if (started[0].command.readConcern) + test.deepEqual({ level: 'local' }, started[0].command.readConcern); + test.deepEqual({ project: 1 }, started[0].command.hint); + test.equal(5, started[0].command.skip); + test.equal(5, started[0].command.limit); + + done(); + }); + }); + } + }); + + it.skip('Correctly decorate the collection count command with skip, limit, hint, readConcern', { + // Add a tag that our runner can trigger on + // in this case we are setting that node needs to be higher than 0.10.X to run + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded'] } + }, + + test: function (done) { + var started = []; + + const configuration = this.configuration; + client.on('commandStarted', function (event) { + if (event.commandName === 'count') started.push(event); + }); + + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.collection('cursor_count_test1', { readConcern: { level: 'local' } }).count( + { + project: '123' + }, + { + readConcern: { level: 'local' }, + limit: 5, + skip: 5, + hint: { project: 1 } + }, + err => { + expect(err).to.not.exist; + test.equal(1, started.length); + if (started[0].command.readConcern) + test.deepEqual({ level: 'local' }, started[0].command.readConcern); + test.deepEqual({ project: 1 }, started[0].command.hint); + test.equal(5, started[0].command.skip); + test.equal(5, started[0].command.limit); + + done(); + } + ); + }); + } + }); + + // NOTE: should we allow users to explicitly `kill` a cursor anymore? + it.skip('Should properly kill a cursor', { + metadata: { + requires: { + topology: ['single', 'replicaset', 'sharded'], + mongodb: '>=3.2.0' + } + }, + + test: function () { + // Load up the documents + const docs = []; + for (let i = 0; i < 1000; i += 1) { + docs.push({ + a: i + }); + } + + const configuration = this.configuration; + + let cleanup = () => {}; + let caughtError = undefined; + + return ( + client + .connect() + .then(client => { + this.defer(() => client.close()); + const db = client.db(configuration.db); + const collection = db.collection('cursorkilltest1'); + + // Insert 1000 documents + return collection.insert(docs).then(() => { + // Generate cursor for find operation + const cursor = collection.find({}); + this.defer(() => cursor.close()); + + // Iterate cursor past first element + return cursor + .next() + .then(() => cursor.next()) + .then(() => { + // Confirm that cursorId is non-zero + const longId = cursor.id; + expect(longId).to.be.an('object'); + expect(Object.getPrototypeOf(longId)).to.haveOwnProperty('_bsontype', 'Long'); + const id = longId.toNumber(); + + expect(id).to.not.equal(0); + + // Kill cursor + return new Promise((resolve, reject) => + cursor.kill((err, r) => (err ? reject(err) : resolve(r))) + ).then(response => { + // sharded clusters will return a long, single return integers + if ( + response && + response.cursorsKilled && + Array.isArray(response.cursorsKilled) + ) { + response.cursorsKilled = response.cursorsKilled.map(id => + typeof id === 'number' ? BSON.Long.fromNumber(id) : id + ); + } + + expect(response.ok).to.equal(1); + expect(response.cursorsKilled[0].equals(longId)).to.be.ok; + }); + }); + }); + }) + + // Clean up. Make sure that even in case of error, we still always clean up connection + .catch(e => (caughtError = e)) + .then(cleanup) + .then(() => { + if (caughtError) { + throw caughtError; + } + }) + ); + } + }); + + it('should return implicit session to pool when client-side cursor exhausts results on initial query', async function () { + const configuration = this.configuration; + const client = configuration.newClient(); + + await client.connect(); + const db = client.db(configuration.db); + const collection = db.collection('cursor_session_tests'); + + await collection.insertMany([{ a: 1, b: 2 }]); + const cursor = collection.find({}); + + await cursor.next(); // implicit close, cursor is exhausted + expect(client.s.activeSessions.size).to.equal(0); + await cursor.close(); + await client.close(); + }); + + it('should return implicit session to pool when client-side cursor exhausts results after a getMore', async function () { + const db = client.db(this.configuration.db); + const collection = db.collection('cursor_session_tests2'); + + const docs = [ + { a: 1, b: 2 }, + { a: 3, b: 4 }, + { a: 5, b: 6 }, + { a: 7, b: 8 }, + { a: 9, b: 10 } + ]; + + await collection.insertMany(docs); + + const cursor = await collection.find({}, { batchSize: 3 }); + for (let i = 0; i < 3; ++i) { + await cursor.next(); + expect(client.s.activeSessions.size).to.equal(1); + } + + await cursor.next(); + expect(client.s.activeSessions.size, 'session not checked in after cursor exhausted').to.equal( + 0 + ); + + await cursor.close(); + }); + + describe('#clone', function () { + let client; + let db; + let collection; + + beforeEach(function () { + client = this.configuration.newClient({ w: 1 }); + + return client.connect().then(client => { + db = client.db(this.configuration.db); + collection = db.collection('test_coll'); + }); + }); + + afterEach(function () { + return client.close(); + }); + + context('when executing on a find cursor', function () { + it('removes the existing session from the cloned cursor', async function () { + const docs = [{ name: 'test1' }, { name: 'test2' }]; + await collection.insertMany(docs); + + const cursor = collection.find({}, { batchSize: 1 }); + try { + const doc = await cursor.next(); + expect(doc).to.exist; + + const clonedCursor = cursor.clone(); + expect(clonedCursor.session).to.be.null; + } finally { + await cursor.close(); + } + }); + }); + + context('when executing on an aggregation cursor', function () { + it('removes the existing session from the cloned cursor', async function () { + const docs = [{ name: 'test1' }, { name: 'test2' }]; + await collection.insertMany(docs); + + const cursor = collection.aggregate([{ $match: {} }], { batchSize: 1 }); + try { + const doc = await cursor.next(); + expect(doc).to.exist; + + const clonedCursor = cursor.clone(); + expect(clonedCursor.session).to.be.null; + } finally { + await cursor.close(); + } + }); + }); + }); + + describe('Cursor forEach Error propagation', function () { + let configuration; + let client; + let cursor; + let collection; + + beforeEach(async function () { + configuration = this.configuration; + client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 }); + await client.connect().catch(() => { + expect.fail('Failed to connect to client'); + }); + collection = client.db(configuration.db).collection('cursor_session_tests2'); + }); + + afterEach(async function () { + await cursor.close(); + await client.close(); + }); + + // NODE-2035 + it('should propagate error when exceptions are thrown from an awaited forEach call', async function () { + const docs = [{ unique_key_2035: 1 }, { unique_key_2035: 2 }, { unique_key_2035: 3 }]; + await collection.insertMany(docs).catch(() => { + expect.fail('Failed to insert documents'); + }); + cursor = collection.find({ + unique_key_2035: { + $exists: true + } + }); + await cursor + .forEach(() => { + throw new Error('FAILURE IN FOREACH CALL'); + }) + .then(() => { + expect.fail('Error in forEach call not caught'); + }) + .catch(err => { + expect(err.message).to.deep.equal('FAILURE IN FOREACH CALL'); + }); + }); + }); + + it('should return a promise when no callback supplied to forEach method', function () { + const configuration = this.configuration; + const client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 }); + + return client.connect().then(() => { + this.defer(() => client.close()); + + const db = client.db(configuration.db); + const collection = db.collection('cursor_session_tests2'); + const cursor = collection.find(); + this.defer(() => cursor.close()); + + const promise = cursor.forEach(() => {}); + expect(promise).to.exist.and.to.be.an.instanceof(Promise); + return promise; + }); + }); + + it('should return false when exhausted and hasNext called more than once', function (done) { + const configuration = this.configuration; + const client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 }); + + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + db.createCollection('cursor_hasNext_test').then(() => { + const cursor = db.collection('cursor_hasNext_test').find(); + this.defer(() => cursor.close()); + + cursor + .hasNext() + .then(val1 => { + expect(val1).to.equal(false); + return cursor.hasNext(); + }) + .then(val2 => { + expect(val2).to.equal(false); + done(); + }); + }); + }); + }); + + const testTransformStream = (config, _done) => { + const client = config.client; + const configuration = config.configuration; + const collectionName = config.collectionName; + const transformFunc = config.transformFunc; + const expectedSet = config.expectedSet; + + let cursor; + const done = err => cursor.close(err2 => client.close(err3 => _done(err || err2 || err3))); + + client.connect((err, client) => { + expect(err).to.not.exist; + + const db = client.db(configuration.db); + let collection; + const docs = [ + { _id: 0, a: { b: 1, c: 0 } }, + { _id: 1, a: { b: 1, c: 0 } }, + { _id: 2, a: { b: 1, c: 0 } } + ]; + const resultSet = new Set(); + Promise.resolve() + .then(() => db.createCollection(collectionName)) + .then(() => (collection = db.collection(collectionName))) + .then(() => collection.insertMany(docs)) + .then(() => { + cursor = collection.find(); + return cursor.stream().map(transformFunc ?? (doc => doc)); + }) + .then(stream => { + stream.on('data', function (doc) { + resultSet.add(doc); + }); + + stream.once('end', function () { + expect(resultSet).to.deep.equal(expectedSet); + done(); + }); + + stream.once('error', e => { + done(e); + }); + }) + .catch(e => done(e)); + }); + }; + + it('stream should apply the supplied transformation function to each document in the stream', function (done) { + const configuration = this.configuration; + const client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 }); + const expectedDocs = [ + { _id: 0, b: 1, c: 0 }, + { _id: 1, b: 1, c: 0 }, + { _id: 2, b: 1, c: 0 } + ]; + const config = { + client: client, + configuration: configuration, + collectionName: 'stream-test-transform', + transformFunc: doc => ({ _id: doc._id, b: doc.a.b, c: doc.a.c }), + expectedSet: new Set(expectedDocs) + }; + + testTransformStream(config, done); + }); + + it('stream should return a stream of unmodified docs if no transform function applied', function (done) { + const configuration = this.configuration; + const client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 }); + const expectedDocs = [ + { _id: 0, a: { b: 1, c: 0 } }, + { _id: 1, a: { b: 1, c: 0 } }, + { _id: 2, a: { b: 1, c: 0 } } + ]; + const config = { + client: client, + configuration: configuration, + collectionName: 'transformStream-test-notransform', + transformFunc: null, + expectedSet: new Set(expectedDocs) + }; + + testTransformStream(config, done); + }); + + it.skip('should apply parent read preference to count command', function (done) { + // NOTE: this test is skipped because mongo orchestration does not test sharded clusters + // with secondaries. This behavior should be unit tested + + const configuration = this.configuration; + const client = configuration.newClient( + { w: 1, readPreference: ReadPreference.SECONDARY }, + { maxPoolSize: 1, connectWithNoPrimary: true } + ); + + client.connect((err, client) => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const db = client.db(configuration.db); + let collection, cursor, spy; + const close = e => cursor.close(() => client.close(() => done(e))); + + Promise.resolve() + .then(() => new Promise(resolve => setTimeout(() => resolve(), 500))) + .then(() => db.createCollection('test_count_readPreference')) + .then(() => (collection = db.collection('test_count_readPreference'))) + .then(() => collection.find()) + .then(_cursor => (cursor = _cursor)) + .then(() => (spy = sinon.spy(cursor.topology, 'command'))) + .then(() => cursor.count()) + .then(() => + expect(spy.firstCall.args[2]) + .to.have.nested.property('readPreference.mode') + .that.equals('secondary') + ) + .then(() => close()) + .catch(e => close(e)); + }); + }); + + it('should not consume first document on hasNext when streaming', function (done) { + const configuration = this.configuration; + const client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 }); + + client.connect(err => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const collection = client.db().collection('documents'); + collection.drop(() => { + const docs = [{ a: 1 }, { a: 2 }, { a: 3 }]; + collection.insertMany(docs, err => { + expect(err).to.not.exist; + + const cursor = collection.find({}, { sort: { a: 1 } }); + cursor.hasNext((err, hasNext) => { + expect(err).to.not.exist; + expect(hasNext).to.be.true; + + const collected = []; + const stream = new Writable({ + objectMode: true, + write: (chunk, encoding, next) => { + collected.push(chunk); + next(undefined, chunk); + } + }); + + const cursorStream = cursor.stream(); + + cursorStream.on('end', () => { + expect(collected).to.have.length(3); + expect(collected).to.eql(docs); + done(); + }); + + cursorStream.pipe(stream); + }); + }); + }); + }); + }); + + describe('transforms', function () { + it('should correctly apply map transform to cursor as readable stream', function (done) { + const configuration = this.configuration; + const client = configuration.newClient(); + client.connect(err => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const docs = 'Aaden Aaron Adrian Aditya Bob Joe'.split(' ').map(x => ({ name: x })); + const coll = client.db(configuration.db).collection('cursor_stream_mapping'); + coll.insertMany(docs, err => { + expect(err).to.not.exist; + + const bag = []; + const stream = coll + .find() + .project({ _id: 0, name: 1 }) + .map(doc => ({ mapped: doc })) + .stream() + .on('data', doc => bag.push(doc)); + + stream.on('error', done).on('end', () => { + expect(bag.map(x => x.mapped)).to.eql(docs.map(x => ({ name: x.name }))); + done(); + }); + }); + }); + }); + + it('should correctly apply map transform when converting cursor to array', function (done) { + const configuration = this.configuration; + const client = configuration.newClient(); + client.connect(err => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + const docs = 'Aaden Aaron Adrian Aditya Bob Joe'.split(' ').map(x => ({ name: x })); + const coll = client.db(configuration.db).collection('cursor_toArray_mapping'); + coll.insertMany(docs, err => { + expect(err).to.not.exist; + + coll + .find() + .project({ _id: 0, name: 1 }) + .map(doc => ({ mapped: doc })) + .toArray((err, mappedDocs) => { + expect(err).to.not.exist; + expect(mappedDocs.map(x => x.mapped)).to.eql(docs.map(x => ({ name: x.name }))); + done(); + }); + }); + }); + }); + }); + + context('sort', function () { + const findSort = (input, output) => + function (done) { + const client = this.configuration.newClient({ monitorCommands: true }); + const events = []; + client.on('commandStarted', event => { + if (event.commandName === 'find') { + events.push(event); + } + }); + const db = client.db('test'); + const collection = db.collection('test_sort_dos'); + const cursor = collection.find({}, { sort: input }); + cursor.next(err => { + expect(err).to.not.exist; + expect(events[0].command.sort).to.be.instanceOf(Map); + expect(Array.from(events[0].command.sort)).to.deep.equal(Array.from(output)); + client.close(done); + }); + }; + + const cursorSort = (input, output) => + function (done) { + const client = this.configuration.newClient({ monitorCommands: true }); + const events = []; + client.on('commandStarted', event => { + if (event.commandName === 'find') { + events.push(event); + } + }); + const db = client.db('test'); + const collection = db.collection('test_sort_dos'); + const cursor = collection.find({}).sort(input); + cursor.next(err => { + expect(err).to.not.exist; + expect(events[0].command.sort).to.be.instanceOf(Map); + expect(Array.from(events[0].command.sort)).to.deep.equal(Array.from(output)); + client.close(done); + }); + }; + + it('should use find options object', findSort({ alpha: 1 }, new Map([['alpha', 1]]))); + it('should use find options string', findSort('alpha', new Map([['alpha', 1]]))); + it('should use find options shallow array', findSort(['alpha', 1], new Map([['alpha', 1]]))); + it('should use find options deep array', findSort([['alpha', 1]], new Map([['alpha', 1]]))); + + it('should use cursor.sort object', cursorSort({ alpha: 1 }, new Map([['alpha', 1]]))); + it('should use cursor.sort string', cursorSort('alpha', new Map([['alpha', 1]]))); + it('should use cursor.sort shallow array', cursorSort(['alpha', 1], new Map([['alpha', 1]]))); + it('should use cursor.sort deep array', cursorSort([['alpha', 1]], new Map([['alpha', 1]]))); + + it('formatSort - one key', () => { + // TODO (NODE-3236): These are unit tests for a standalone function and should be moved out of the cursor context file + expect(formatSort('alpha')).to.deep.equal(new Map([['alpha', 1]])); + expect(formatSort(['alpha'])).to.deep.equal(new Map([['alpha', 1]])); + expect(formatSort('alpha', 1)).to.deep.equal(new Map([['alpha', 1]])); + expect(formatSort('alpha', 'asc')).to.deep.equal(new Map([['alpha', 1]])); + expect(formatSort([['alpha', 'asc']])).to.deep.equal(new Map([['alpha', 1]])); + expect(formatSort('alpha', 'ascending')).to.deep.equal(new Map([['alpha', 1]])); + expect(formatSort({ alpha: 1 })).to.deep.equal(new Map([['alpha', 1]])); + expect(formatSort('beta')).to.deep.equal(new Map([['beta', 1]])); + expect(formatSort(['beta'])).to.deep.equal(new Map([['beta', 1]])); + expect(formatSort('beta', -1)).to.deep.equal(new Map([['beta', -1]])); + expect(formatSort('beta', 'desc')).to.deep.equal(new Map([['beta', -1]])); + expect(formatSort('beta', 'descending')).to.deep.equal(new Map([['beta', -1]])); + expect(formatSort({ beta: -1 })).to.deep.equal(new Map([['beta', -1]])); + expect(formatSort({ alpha: { $meta: 'hi' } })).to.deep.equal( + new Map([['alpha', { $meta: 'hi' }]]) + ); + }); + + it('formatSort - multi key', () => { + expect(formatSort(['alpha', 'beta'])).to.deep.equal( + new Map([ + ['alpha', 1], + ['beta', 1] + ]) + ); + expect(formatSort({ alpha: 1, beta: 1 })).to.deep.equal( + new Map([ + ['alpha', 1], + ['beta', 1] + ]) + ); + expect( + formatSort([ + ['alpha', 'asc'], + ['beta', 'ascending'] + ]) + ).to.deep.equal( + new Map([ + ['alpha', 1], + ['beta', 1] + ]) + ); + expect( + formatSort( + new Map([ + ['alpha', 'asc'], + ['beta', 'ascending'] + ]) + ) + ).to.deep.equal( + new Map([ + ['alpha', 1], + ['beta', 1] + ]) + ); + expect( + formatSort([ + ['3', 'asc'], + ['1', 'ascending'] + ]) + ).to.deep.equal( + new Map([ + ['3', 1], + ['1', 1] + ]) + ); + expect(formatSort({ alpha: { $meta: 'hi' }, beta: 'ascending' })).to.deep.equal( + new Map([ + ['alpha', { $meta: 'hi' }], + ['beta', 1] + ]) + ); + }); + + it('should use allowDiskUse option on sort', { + metadata: { requires: { mongodb: '>=4.4' } }, + test: async function () { + const events = []; + client.on('commandStarted', event => { + if (event.commandName === 'find') { + events.push(event); + } + }); + const db = client.db('test'); + const collection = db.collection('test_sort_allow_disk_use'); + const cursor = collection.find({}).sort(['alpha', 1]).allowDiskUse(); + await cursor.next(); + const { command } = events.shift(); + expect(command.sort).to.deep.equal(new Map([['alpha', 1]])); + expect(command.allowDiskUse).to.be.true; + } + }); + + it('should error if allowDiskUse option used without sort', { + metadata: { requires: { mongodb: '>=4.4' } }, + test: async function () { + const client = this.configuration.newClient(); + const db = client.db('test'); + const collection = db.collection('test_sort_allow_disk_use'); + expect(() => collection.find({}).allowDiskUse()).to.throw( + /Option "allowDiskUse" requires a sort specification/ + ); + await client.close(); + } + }); + }); +}); diff --git a/test/integration/node-specific/bson-options/ignore_undefined.test.js b/test/integration/node-specific/bson-options/ignore_undefined.test.js new file mode 100644 index 00000000000..e69de29bb2d diff --git a/test/integration/node-specific/bson-options/promote_buffers.test.js b/test/integration/node-specific/bson-options/promote_buffers.test.js new file mode 100644 index 00000000000..e69de29bb2d diff --git a/test/integration/node-specific/bson-options/promote_values.test.js b/test/integration/node-specific/bson-options/promote_values.test.js new file mode 100644 index 00000000000..e69de29bb2d diff --git a/test/integration/node-specific/cursor_stream.test.js b/test/integration/node-specific/cursor_stream.test.js new file mode 100644 index 00000000000..5325fad75a5 --- /dev/null +++ b/test/integration/node-specific/cursor_stream.test.js @@ -0,0 +1,354 @@ +'use strict'; +const { expect } = require('chai'); +const { Binary } = require('../../mongodb'); +const { setTimeout, setImmediate } = require('timers'); + +describe.skip('Cursor Streams', function () { + let client; + + beforeEach(async function () { + client = this.configuration.newClient(); + }); + + afterEach(async function () { + await client.close(); + }); + + it('should stream documents with pause and resume for fetching', { + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } + }, + + test: function (done) { + var self = this; + var docs = []; + var j = 0; + + for (var i = 0; i < 3000; i++) { + docs.push({ a: i }); + } + + var allDocs = []; + while (docs.length > 0) { + allDocs.push(docs.splice(0, 1000)); + } + + var client = self.configuration.newClient(self.configuration.writeConcernMax(), { + maxPoolSize: 1 + }); + + client.connect(function (err, client) { + var db = client.db(self.configuration.db); + db.createCollection( + 'test_streaming_function_with_limit_for_fetching2', + function (err, collection) { + var left = allDocs.length; + for (var i = 0; i < allDocs.length; i++) { + collection.insert(allDocs[i], { writeConcern: { w: 1 } }, function (err) { + expect(err).to.not.exist; + + left = left - 1; + + if (left === 0) { + // Perform a find to get a cursor + var stream = collection.find({}).stream(); + var data = []; + + // For each data item + stream.on('data', function () { + data.push(1); + j = j + 1; + stream.pause(); + + collection.findOne({}, function (err) { + expect(err).to.not.exist; + stream.resume(); + }); + }); + + // When the stream is done + stream.on('end', function () { + setTimeout(() => { + let err; + try { + expect(data).to.have.length(3000); + } catch (e) { + err = e; + } + + client.close(() => done(err)); + }, 1000); + }); + } + }); + } + } + ); + }); + } + }); + + it('should stream 10K documents', { + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } + }, + + test: function (done) { + var self = this; + var docs = []; + + for (var i = 0; i < 10000; i++) { + docs.push({ a: i, bin: new Binary(Buffer.alloc(256)) }); + } + + var j = 0; + + var allDocs = []; + while (docs.length > 0) { + allDocs.push(docs.splice(0, 1000)); + } + + var client = self.configuration.newClient(self.configuration.writeConcernMax(), { + maxPoolSize: 1 + }); + + client.connect(function (err, client) { + var db = client.db(self.configuration.db); + db.createCollection( + 'test_streaming_function_with_limit_for_fetching_2', + function (err, collection) { + var left = allDocs.length; + for (var i = 0; i < allDocs.length; i++) { + collection.insert(allDocs[i], { writeConcern: { w: 1 } }, function (err) { + expect(err).to.not.exist; + left = left - 1; + + if (left === 0) { + // Perform a find to get a cursor + var stream = collection.find({}).stream(); + var data = []; + + // For each data item + stream.on('data', function () { + j = j + 1; + stream.pause(); + data.push(1); + + collection.findOne({}, function (err) { + expect(err).to.not.exist; + stream.resume(); + }); + }); + + // When the stream is done + stream.on('end', function () { + setTimeout(() => { + let err; + try { + expect(data).to.have.length(10000); + } catch (e) { + err = e; + } + + client.close(err2 => done(err || err2)); + }, 1000); + }); + } + }); + } + } + ); + }); + } + }); + + it('should trigger massive amount of getMores', { + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } + }, + + test: function (done) { + var self = this; + var docs = []; + var counter = 0; + var counter2 = 0; + + for (var i = 0; i < 1000; i++) { + docs.push({ a: i, bin: new Binary(Buffer.alloc(256)) }); + } + + var client = self.configuration.newClient(self.configuration.writeConcernMax(), { + maxPoolSize: 1 + }); + + client.connect(function (err, client) { + var db = client.db(self.configuration.db); + db.createCollection( + 'test_streaming_function_with_limit_for_fetching_3', + function (err, collection) { + collection.insert(docs, { writeConcern: { w: 1 } }, function (err) { + expect(err).to.not.exist; + + // Perform a find to get a cursor + var stream = collection.find({}).stream(); + + // For each data item + stream.on('data', function () { + counter++; + stream.pause(); + stream.resume(); + counter2++; + }); + + // When the stream is done + stream.on('end', function () { + expect(counter).to.equal(1000); + expect(counter2).to.equal(1000); + client.close(done); + }); + }); + } + ); + }); + } + }); + + it('should stream documents across getMore command and count correctly', async function () { + if (process.platform === 'darwin') { + this.skipReason = 'TODO(NODE-3819): Unskip flaky MacOS tests.'; + return this.skip(); + } + + const db = client.db(); + const collection = db.collection('streaming'); + const updateCollection = db.collection('update_within_streaming'); + + await collection.drop().catch(() => null); + await updateCollection.drop().catch(() => null); + + const docs = Array.from({ length: 10 }, (_, i) => ({ + _id: i, + b: new Binary(Buffer.alloc(1024)) + })); + + await collection.insertMany(docs); + // Set the batchSize to be a 5th of the total docCount to make getMores happen + const stream = collection.find({}, { batchSize: 2 }).stream(); + + let done; + const end = new Promise((resolve, reject) => { + done = error => (error != null ? reject(error) : resolve()); + }); + + stream.on('end', () => { + updateCollection + .findOne({ id: 1 }) + .then(function (doc) { + expect(doc.count).to.equal(9); + done(); + }) + .catch(done) + .finally(() => client.close()); + }); + + let docCount = 0; + stream.on('data', data => { + stream.pause(); + try { + expect(data).to.have.property('_id', docCount); + } catch (assertionError) { + return done(assertionError); + } + + if (docCount++ === docs.length - 1) { + stream.resume(); + return; + } + + updateCollection + .updateMany({ id: 1 }, { $inc: { count: 1 } }, { writeConcern: { w: 1 }, upsert: true }) + .then(() => { + stream.resume(); + }) + .catch(done); + }); + + return end; + }); + + it('should correctly error out stream', { + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } + }, + + test: function (done) { + var self = this; + var client = self.configuration.newClient(self.configuration.writeConcernMax(), { + maxPoolSize: 1 + }); + + client.connect((err, client) => { + const db = client.db(self.configuration.db); + const cursor = db.collection('myCollection').find({ + timestamp: { $ltx: '1111' } // Error in query. + }); + + let error; + const stream = cursor.stream(); + stream.on('error', err => (error = err)); + cursor.on('close', function () { + setImmediate(() => { + expect(error).to.exist; + client.close(done); + }); + }); + + stream.pipe(process.stdout); + }); + } + }); + + it('should correctly stream cursor after stream', { + metadata: { + requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } + }, + + test: function (done) { + var self = this; + var client = self.configuration.newClient(self.configuration.writeConcernMax(), { + maxPoolSize: 1 + }); + + client.connect(function (err, client) { + var db = client.db(self.configuration.db); + var docs = []; + var received = []; + + for (var i = 0; i < 1000; i++) { + docs.push({ a: i, field: 'hello world' }); + } + + db.collection('cursor_sort_stream').insertMany(docs, function (err) { + expect(err).to.not.exist; + + var cursor = db + .collection('cursor_sort_stream') + .find({}) + .project({ a: 1 }) + .sort({ a: -1 }); + const stream = cursor.stream(); + + stream.on('end', function () { + expect(received).to.have.length(1000); + + client.close(done); + }); + + stream.on('data', function (d) { + received.push(d); + }); + }); + }); + } + }); +}); From 1e9655d7635520d6278f3649464be3f936b6383e Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Fri, 17 Oct 2025 13:54:13 +0200 Subject: [PATCH 37/41] test(NODE-7179): import classes instead of types --- .../client_side_operations_timeout.unit.test.ts | 15 ++++----------- .../initial_dns_seedlist_discovery.prose.test.ts | 6 +++++- .../node-specific/convert_socket_errors.test.ts | 3 ++- .../server_discover_and_monitoring.test.ts | 3 ++- 4 files changed, 13 insertions(+), 14 deletions(-) diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index 9a62c26a823..4f2a38de73b 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -10,18 +10,11 @@ import { setTimeout } from 'timers'; import { TLSSocket } from 'tls'; import { promisify } from 'util'; -import { - Connection, - ConnectionPool, - CSOTTimeoutContext, - type MongoClient, - MongoOperationTimeoutError, - ObjectId, - Timeout, - TimeoutContext, - Topology -} from '../../../src'; +import { Connection, type MongoClient, MongoOperationTimeoutError, ObjectId } from '../../../src'; import { StateMachine } from '../../../src/client-side-encryption/state_machine'; +import { ConnectionPool } from '../../../src/cmap/connection_pool'; +import { Topology } from '../../../src/sdam/topology'; +import { CSOTTimeoutContext, Timeout, TimeoutContext } from '../../../src/timeout'; import { measureDuration, sleep } from '../../tools/utils'; import { createTimerSandbox } from '../../unit/timer_sandbox'; diff --git a/test/integration/initial-dns-seedlist-discovery/initial_dns_seedlist_discovery.prose.test.ts b/test/integration/initial-dns-seedlist-discovery/initial_dns_seedlist_discovery.prose.test.ts index 5516760417f..c36e675ae58 100644 --- a/test/integration/initial-dns-seedlist-discovery/initial_dns_seedlist_discovery.prose.test.ts +++ b/test/integration/initial-dns-seedlist-discovery/initial_dns_seedlist_discovery.prose.test.ts @@ -2,7 +2,11 @@ import { expect } from 'chai'; import * as dns from 'dns'; import * as sinon from 'sinon'; -import { ConnectionPool, MongoAPIError, Server, ServerDescription, Topology } from '../../../src'; +import { ConnectionPool } from '../../../src/cmap/connection_pool'; +import { MongoAPIError } from '../../../src/error'; +import { Server } from '../../../src/sdam/server'; +import { ServerDescription } from '../../../src/sdam/server_description'; +import { Topology } from '../../../src/sdam/topology'; import { topologyWithPlaceholderClient } from '../../tools/utils'; describe('Initial DNS Seedlist Discovery (Prose Tests)', () => { diff --git a/test/integration/node-specific/convert_socket_errors.test.ts b/test/integration/node-specific/convert_socket_errors.test.ts index e3959023b70..04c2a3720ed 100644 --- a/test/integration/node-specific/convert_socket_errors.test.ts +++ b/test/integration/node-specific/convert_socket_errors.test.ts @@ -3,7 +3,8 @@ import { Duplex } from 'node:stream'; import { expect } from 'chai'; import * as sinon from 'sinon'; -import { Connection, type MongoClient, MongoNetworkError } from '../../../src'; +import { type MongoClient, MongoNetworkError } from '../../../src'; +import { Connection } from '../../../src/cmap/connection'; import { ns } from '../../../src/utils'; import { clearFailPoint, configureFailPoint } from '../../tools/utils'; diff --git a/test/integration/server-discovery-and-monitoring/server_discover_and_monitoring.test.ts b/test/integration/server-discovery-and-monitoring/server_discover_and_monitoring.test.ts index cceca051f5e..3299bcde543 100644 --- a/test/integration/server-discovery-and-monitoring/server_discover_and_monitoring.test.ts +++ b/test/integration/server-discovery-and-monitoring/server_discover_and_monitoring.test.ts @@ -3,7 +3,8 @@ import { setTimeout } from 'node:timers/promises'; import { expect } from 'chai'; import * as sinon from 'sinon'; -import { Connection, type MongoClient, type ServerHeartbeatSucceededEvent } from '../../../src'; +import { type MongoClient, type ServerHeartbeatSucceededEvent } from '../../../src'; +import { Connection } from '../../../src/cmap/connection'; import { promiseWithResolvers } from '../../../src/utils'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; From 496acec43256f68ee3446b6daced45b55a16f351 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Fri, 17 Oct 2025 14:19:20 +0200 Subject: [PATCH 38/41] skip operations --- .../client_side_operations_timeout.unit.test.ts | 3 ++- test/integration/node-specific/operation_examples.test.ts | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index 4f2a38de73b..def0bd88af8 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -10,8 +10,9 @@ import { setTimeout } from 'timers'; import { TLSSocket } from 'tls'; import { promisify } from 'util'; -import { Connection, type MongoClient, MongoOperationTimeoutError, ObjectId } from '../../../src'; +import { type MongoClient, MongoOperationTimeoutError, ObjectId } from '../../../src'; import { StateMachine } from '../../../src/client-side-encryption/state_machine'; +import { Connection } from '../../../src/cmap/connection'; import { ConnectionPool } from '../../../src/cmap/connection_pool'; import { Topology } from '../../../src/sdam/topology'; import { CSOTTimeoutContext, Timeout, TimeoutContext } from '../../../src/timeout'; diff --git a/test/integration/node-specific/operation_examples.test.ts b/test/integration/node-specific/operation_examples.test.ts index d8e07af5462..447db2832cd 100644 --- a/test/integration/node-specific/operation_examples.test.ts +++ b/test/integration/node-specific/operation_examples.test.ts @@ -5,7 +5,7 @@ import { enumToString } from '../../../src/utils'; import { sleep as delay } from '../../tools/utils'; import { setupDatabase } from '../shared'; -describe('Operations', function () { +describe.skip('Operations', function () { let client: MongoClient; beforeEach(async function () { From 4c56e6074d3d7e3bc9b87d8ac88de07dbe7d2d9f Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Mon, 20 Oct 2025 15:04:39 +0200 Subject: [PATCH 39/41] remove TODO(NODE-6599) comments --- .../mongodb-handshake/mongodb-handshake.prose.test.ts | 6 ------ 1 file changed, 6 deletions(-) diff --git a/test/integration/mongodb-handshake/mongodb-handshake.prose.test.ts b/test/integration/mongodb-handshake/mongodb-handshake.prose.test.ts index 55eaee1ec45..a8ac32b99cf 100644 --- a/test/integration/mongodb-handshake/mongodb-handshake.prose.test.ts +++ b/test/integration/mongodb-handshake/mongodb-handshake.prose.test.ts @@ -361,7 +361,6 @@ describe('Client Metadata Update Prose Tests', function () { let initialClientMetadata: ClientMetadata; let updatedClientMetadata: ClientMetadata; - // TODO(NODE-6599): mongodb-legacy adds additional client metadata, breaking these prose tests let client: MongoClient; // | Case | Name | Version | Platform | @@ -478,7 +477,6 @@ describe('Client Metadata Update Prose Tests', function () { describe('Test 4: Multiple Metadata Updates with Duplicate Data', function () { let initialClientMetadata: ClientMetadata; let updatedClientMetadata: ClientMetadata; - // TODO(NODE-6599): mongodb-legacy adds additional client metadata, breaking these prose tests let client: MongoClient; afterEach(async function () { @@ -571,7 +569,6 @@ describe('Client Metadata Update Prose Tests', function () { describe('Test 5: Metadata is not appended if identical to initial metadata', function () { let initialClientMetadata: ClientMetadata; let updatedClientMetadata: ClientMetadata; - // TODO(NODE-6599): mongodb-legacy adds additional client metadata, breaking these prose tests let client: MongoClient; // 1. Create a `MongoClient` instance with the following: @@ -649,7 +646,6 @@ describe('Client Metadata Update Prose Tests', function () { describe('Test 6: Metadata is not appended if identical to initial metadata (separated by non-identical metadata)', function () { let clientMetadata: ClientMetadata; let updatedClientMetadata: ClientMetadata; - // TODO(NODE-6599): mongodb-legacy adds additional client metadata, breaking these prose tests let client: MongoClient; afterEach(async function () { @@ -741,7 +737,6 @@ describe('Client Metadata Update Prose Tests', function () { describe('Test 7: Empty strings are considered unset when appending duplicate metadata', function () { let initialClientMetadata: ClientMetadata; let updatedClientMetadata: ClientMetadata; - // TODO(NODE-6599): mongodb-legacy adds additional client metadata, breaking these prose tests let client: MongoClient; afterEach(async function () { @@ -845,7 +840,6 @@ describe('Client Metadata Update Prose Tests', function () { describe('Test 8: Empty strings are considered unset when appending metadata identical to initial metadata', function () { let initialClientMetadata: ClientMetadata; let updatedClientMetadata: ClientMetadata; - // TODO(NODE-6599): mongodb-legacy adds additional client metadata, breaking these prose tests let client: MongoClient; afterEach(async function () { From 892a2abdef9c00447f05c4441fb5ad35cb7f349e Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Fri, 24 Oct 2025 17:03:08 +0200 Subject: [PATCH 40/41] remove .js files which were migrated to .ts --- test/integration/crud/insert.test.js | 0 test/integration/crud/misc_cursors.test.js | 4104 ----------------- .../bson-options/ignore_undefined.test.js | 0 .../bson-options/promote_buffers.test.js | 0 .../bson-options/promote_values.test.js | 0 .../node-specific/cursor_stream.test.js | 354 -- 6 files changed, 4458 deletions(-) delete mode 100644 test/integration/crud/insert.test.js delete mode 100644 test/integration/crud/misc_cursors.test.js delete mode 100644 test/integration/node-specific/bson-options/ignore_undefined.test.js delete mode 100644 test/integration/node-specific/bson-options/promote_buffers.test.js delete mode 100644 test/integration/node-specific/bson-options/promote_values.test.js delete mode 100644 test/integration/node-specific/cursor_stream.test.js diff --git a/test/integration/crud/insert.test.js b/test/integration/crud/insert.test.js deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/test/integration/crud/misc_cursors.test.js b/test/integration/crud/misc_cursors.test.js deleted file mode 100644 index d6492854ce3..00000000000 --- a/test/integration/crud/misc_cursors.test.js +++ /dev/null @@ -1,4104 +0,0 @@ -'use strict'; -const { assert: test, filterForCommands, setupDatabase } = require('../shared'); -const { runLater, sleep } = require('../../tools/utils'); -const fs = require('fs'); -const os = require('os'); -const path = require('path'); -const { expect } = require('chai'); -const BSON = require('bson'); -const sinon = require('sinon'); -const { Writable } = require('stream'); -const { once, on } = require('events'); -const { setTimeout } = require('timers'); -const { ReadPreference } = require('../../mongodb'); -const { ServerType, MongoClientClosedError } = require('../../mongodb'); -const { formatSort } = require('../../mongodb'); - -describe.skip('Cursor', function () { - before(function () { - return setupDatabase(this.configuration, [ - 'cursorkilltest1', - 'cursor_session_tests', - 'cursor_session_tests2' - ]); - }); - - let client; - - beforeEach(async function () { - client = this.configuration.newClient({ maxPoolSize: 1, monitorCommands: true }); - }); - - afterEach(async function () { - await client.close(); - }); - - it('should not throw an error when toArray and forEach are called after cursor is closed', async function () { - const db = client.db(); - - const collection = await db.collection('test_to_a'); - await collection.insertMany([{ a: 1 }]); - const cursor = collection.find({}); - - const firstToArray = await cursor.toArray().catch(error => error); - expect(firstToArray).to.be.an('array'); - - expect(cursor.closed).to.be.true; - - const secondToArray = await cursor.toArray().catch(error => error); - expect(secondToArray).to.be.an('array'); - expect(secondToArray).to.have.lengthOf(0); - - const forEachResult = await cursor - .forEach(() => { - expect.fail('should not run forEach on an empty/closed cursor'); - }) - .catch(error => error); - expect(forEachResult).to.be.undefined; - }); - - it('cursor should close after first next operation', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('close_on_next', (err, collection) => { - expect(err).to.not.exist; - - collection.insert( - [{ a: 1 }, { a: 1 }, { a: 1 }], - configuration.writeConcernMax(), - err => { - expect(err).to.not.exist; - - var cursor = collection.find({}); - this.defer(() => cursor.close()); - - cursor.batchSize(2); - cursor.next(err => { - expect(err).to.not.exist; - done(); - }); - } - ); - }); - }); - } - }); - - it('cursor should trigger getMore', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('trigger_get_more', (err, collection) => { - expect(err).to.not.exist; - - collection.insert( - [{ a: 1 }, { a: 1 }, { a: 1 }], - configuration.writeConcernMax(), - err => { - expect(err).to.not.exist; - const cursor = collection.find({}).batchSize(2); - this.defer(() => cursor.close()); - cursor.toArray(err => { - expect(err).to.not.exist; - done(); - }); - } - ); - }); - }); - } - }); - - it('shouldCorrectlyExecuteCursorExplain', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_explain', (err, collection) => { - expect(err).to.not.exist; - - collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - collection.find({ a: 1 }).explain((err, explanation) => { - expect(err).to.not.exist; - expect(explanation).to.exist; - done(); - }); - }); - }); - }); - } - }); - - it('shouldCorrectlyExecuteCursorCount', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_count', (err, collection) => { - expect(err).to.not.exist; - - collection.find().count(err => { - expect(err).to.not.exist; - - function insert(callback) { - var total = 10; - - for (var i = 0; i < 10; i++) { - collection.insert({ x: i }, configuration.writeConcernMax(), e => { - expect(e).to.not.exist; - total = total - 1; - if (total === 0) callback(); - }); - } - } - - function finished() { - collection.find().count((err, count) => { - expect(err).to.not.exist; - test.equal(10, count); - test.ok(count.constructor === Number); - - collection.find({}, { limit: 5 }).count((err, count) => { - expect(err).to.not.exist; - test.equal(5, count); - - collection.find({}, { skip: 5 }).count((err, count) => { - expect(err).to.not.exist; - test.equal(5, count); - - db.collection('acollectionthatdoesn').count((err, count) => { - expect(err).to.not.exist; - test.equal(0, count); - - var cursor = collection.find(); - cursor.count((err, count) => { - expect(err).to.not.exist; - test.equal(10, count); - - cursor.forEach( - () => {}, - err => { - expect(err).to.not.exist; - cursor.count((err, count2) => { - expect(err).to.not.exist; - expect(count2).to.equal(10); - expect(count2).to.equal(count); - done(); - }); - } - ); - }); - }); - }); - }); - }); - } - - insert(function () { - finished(); - }); - }); - }); - }); - } - }); - - it('should correctly execute cursor count with secondary readPreference', { - metadata: { requires: { topology: 'replicaset' } }, - async test() { - const bag = []; - client.on('commandStarted', filterForCommands(['count'], bag)); - - const cursor = client - .db() - .collection('countTEST') - .find({ qty: { $gt: 4 } }); - await cursor.count({ readPreference: ReadPreference.SECONDARY }); - - const selectedServerAddress = bag[0].address - .replace('127.0.0.1', 'localhost') - .replace('[::1]', 'localhost'); - const selectedServer = client.topology.description.servers.get(selectedServerAddress); - expect(selectedServer).property('type').to.equal(ServerType.RSSecondary); - } - }); - - it('shouldCorrectlyExecuteCursorCountWithDottedCollectionName', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_count.ext', (err, collection) => { - expect(err).to.not.exist; - - collection.find().count(err => { - expect(err).to.not.exist; - - function insert(callback) { - var total = 10; - - for (var i = 0; i < 10; i++) { - collection.insert({ x: i }, configuration.writeConcernMax(), e => { - expect(e).to.not.exist; - total = total - 1; - if (total === 0) callback(); - }); - } - } - - function finished() { - collection.find().count((err, count) => { - expect(err).to.not.exist; - test.equal(10, count); - test.ok(count.constructor === Number); - - collection.find({}, { limit: 5 }).count((err, count) => { - expect(err).to.not.exist; - test.equal(5, count); - - collection.find({}, { skip: 5 }).count((err, count) => { - expect(err).to.not.exist; - test.equal(5, count); - - db.collection('acollectionthatdoesn').count((err, count) => { - expect(err).to.not.exist; - test.equal(0, count); - - var cursor = collection.find(); - cursor.count((err, count) => { - expect(err).to.not.exist; - test.equal(10, count); - - cursor.forEach( - () => {}, - err => { - expect(err).to.not.exist; - cursor.count((err, count2) => { - expect(err).to.not.exist; - expect(count2).to.equal(10); - expect(count2).to.equal(count); - done(); - }); - } - ); - }); - }); - }); - }); - }); - } - - insert(function () { - finished(); - }); - }); - }); - }); - } - }); - - it('shouldThrowErrorOnEachWhenMissingCallback', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_each', (err, collection) => { - expect(err).to.not.exist; - function insert(callback) { - var total = 10; - - for (var i = 0; i < 10; i++) { - collection.insert({ x: i }, configuration.writeConcernMax(), e => { - expect(e).to.not.exist; - total = total - 1; - if (total === 0) callback(); - }); - } - } - - function finished() { - const cursor = collection.find(); - - test.throws(function () { - cursor.forEach(); - }); - - done(); - } - - insert(function () { - finished(); - }); - }); - }); - } - }); - - it('shouldCorrectlyHandleLimitOnCursor', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_cursor_limit', (err, collection) => { - function insert(callback) { - var total = 10; - - for (var i = 0; i < 10; i++) { - collection.insert({ x: i }, configuration.writeConcernMax(), e => { - expect(e).to.not.exist; - total = total - 1; - if (total === 0) callback(); - }); - } - } - - function finished() { - collection - .find() - .limit(5) - .toArray((err, items) => { - test.equal(5, items.length); - - // Let's close the db - expect(err).to.not.exist; - done(); - }); - } - - insert(function () { - finished(); - }); - }); - }); - } - }); - - it('shouldCorrectlyHandleNegativeOneLimitOnCursor', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_cursor_negative_one_limit', (err, collection) => { - expect(err).to.not.exist; - function insert(callback) { - var total = 10; - - for (var i = 0; i < 10; i++) { - collection.insert({ x: i }, configuration.writeConcernMax(), e => { - expect(e).to.not.exist; - total = total - 1; - if (total === 0) callback(); - }); - } - } - - function finished() { - collection - .find() - .limit(-1) - .toArray((err, items) => { - expect(err).to.not.exist; - test.equal(1, items.length); - - // Let's close the db - done(); - }); - } - - insert(function () { - finished(); - }); - }); - }); - } - }); - - it('shouldCorrectlyHandleAnyNegativeLimitOnCursor', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_cursor_any_negative_limit', (err, collection) => { - expect(err).to.not.exist; - function insert(callback) { - var total = 10; - - for (var i = 0; i < 10; i++) { - collection.insert({ x: i }, configuration.writeConcernMax(), e => { - expect(e).to.not.exist; - total = total - 1; - if (total === 0) callback(); - }); - } - } - - function finished() { - collection - .find() - .limit(-5) - .toArray((err, items) => { - expect(err).to.not.exist; - test.equal(5, items.length); - - // Let's close the db - done(); - }); - } - - insert(function () { - finished(); - }); - }); - }); - } - }); - - it('shouldCorrectlyReturnErrorsOnIllegalLimitValuesNotAnInt', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_limit_exceptions_2', (err, collection) => { - expect(err).to.not.exist; - - collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - const cursor = collection.find(); - this.defer(() => cursor.close()); - - try { - cursor.limit('not-an-integer'); - } catch (err) { - test.equal('Operation "limit" requires an integer', err.message); - } - - done(); - }); - }); - }); - } - }); - - it('shouldCorrectlyReturnErrorsOnIllegalLimitValuesIsClosedWithinNext', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_limit_exceptions', (err, collection) => { - expect(err).to.not.exist; - - collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - const cursor = collection.find(); - this.defer(() => cursor.close()); - - cursor.next(err => { - expect(err).to.not.exist; - expect(() => { - cursor.limit(1); - }).to.throw(/Cursor is already initialized/); - - done(); - }); - }); - }); - }); - } - }); - - // NOTE: who cares what you set when the cursor is closed? - it.skip('shouldCorrectlyReturnErrorsOnIllegalLimitValuesIsClosedWithinClose', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_limit_exceptions_1', (err, collection) => { - expect(err).to.not.exist; - - collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - const cursor = collection.find(); - cursor.close(err => { - expect(err).to.not.exist; - expect(() => { - cursor.limit(1); - }).to.throw(/not extensible/); - - done(); - }); - }); - }); - }); - } - }); - - it('shouldCorrectlySkipRecordsOnCursor', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_skip', (err, collection) => { - expect(err).to.not.exist; - - const insert = callback => { - var total = 10; - for (var i = 0; i < 10; i++) { - collection.insert({ x: i }, configuration.writeConcernMax(), e => { - expect(e).to.not.exist; - - total = total - 1; - if (total === 0) callback(); - }); - } - }; - - insert(() => { - const cursor = collection.find(); - this.defer(() => cursor.close()); - - cursor.count((err, count) => { - expect(err).to.not.exist; - test.equal(10, count); - }); - - const cursor2 = collection.find(); - this.defer(() => cursor2.close()); - - cursor2.toArray((err, items) => { - expect(err).to.not.exist; - test.equal(10, items.length); - - collection - .find() - .skip(2) - .toArray((err, items2) => { - expect(err).to.not.exist; - test.equal(8, items2.length); - - // Check that we have the same elements - var numberEqual = 0; - var sliced = items.slice(2, 10); - - for (var i = 0; i < sliced.length; i++) { - if (sliced[i].x === items2[i].x) numberEqual = numberEqual + 1; - } - - test.equal(8, numberEqual); - done(); - }); - }); - }); - }); - }); - } - }); - - it('shouldCorrectlyReturnErrorsOnIllegalSkipValues', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_skip_exceptions', (err, collection) => { - expect(err).to.not.exist; - collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - try { - collection.find().skip('not-an-integer'); - } catch (err) { - test.equal('Operation "skip" requires an integer', err.message); - } - - const cursor = collection.find(); - cursor.next(err => { - expect(err).to.not.exist; - - // NOTE: who cares what you set when closed, if not initialized - // expect(() => { - // cursor.skip(1); - // }).to.throw(/not extensible/); - - const cursor2 = collection.find(); - cursor2.close(err => { - expect(err).to.not.exist; - - // NOTE: who cares what you set when closed, if not initialized - // expect(() => { - // cursor2.skip(1); - // }).to.throw(/not extensible/); - - done(); - }); - }); - }); - }); - }); - } - }); - - it('shouldReturnErrorsOnIllegalBatchSizes', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_batchSize_exceptions', (err, collection) => { - expect(err).to.not.exist; - collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - let cursor = collection.find(); - try { - cursor.batchSize('not-an-integer'); - test.ok(false); - } catch (err) { - test.equal('Operation "batchSize" requires an integer', err.message); - } - - cursor = collection.find(); - cursor.next(err => { - expect(err).to.not.exist; - - cursor.next(err => { - expect(err).to.not.exist; - - // NOTE: who cares what you set when closed, if not initialized - // expect(() => { - // cursor.batchSize(1); - // }).to.throw(/not extensible/); - - const cursor2 = collection.find(); - cursor2.close(err => { - expect(err).to.not.exist; - - // NOTE: who cares what you set when closed, if not initialized - // expect(() => { - // cursor2.batchSize(1); - // }).to.throw(/not extensible/); - - done(); - }); - }); - }); - }); - }); - }); - } - }); - - it('shouldCorrectlyHandleBatchSize', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_multiple_batch_size', (err, collection) => { - expect(err).to.not.exist; - - //test with the last batch that is a multiple of batchSize - var records = 4; - var batchSize = 2; - var docs = []; - for (var i = 0; i < records; i++) { - docs.push({ a: i }); - } - - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - const cursor = collection.find({}, { batchSize: batchSize }); - - //1st - cursor.next((err, items) => { - expect(err).to.not.exist; - test.equal(1, cursor.bufferedCount()); - test.ok(items != null); - - //2nd - cursor.next((err, items) => { - expect(err).to.not.exist; - test.equal(0, cursor.bufferedCount()); - test.ok(items != null); - - //3rd - cursor.next((err, items) => { - expect(err).to.not.exist; - test.equal(1, cursor.bufferedCount()); - test.ok(items != null); - - //4th - cursor.next((err, items) => { - expect(err).to.not.exist; - test.equal(0, cursor.bufferedCount()); - test.ok(items != null); - - //No more - cursor.next((err, items) => { - expect(err).to.not.exist; - test.ok(items == null); - test.ok(cursor.closed); - done(); - }); - }); - }); - }); - }); - }); - }); - }); - } - }); - - it('shouldHandleWhenLimitBiggerThanBatchSize', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_limit_greater_than_batch_size', (err, collection) => { - expect(err).to.not.exist; - - var limit = 4; - var records = 10; - var batchSize = 3; - var docs = []; - for (var i = 0; i < records; i++) { - docs.push({ a: i }); - } - - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - var cursor = collection.find({}, { batchSize: batchSize, limit: limit }); - //1st - cursor.next(err => { - expect(err).to.not.exist; - test.equal(2, cursor.bufferedCount()); - - //2nd - cursor.next(err => { - expect(err).to.not.exist; - test.equal(1, cursor.bufferedCount()); - - //3rd - cursor.next(err => { - expect(err).to.not.exist; - test.equal(0, cursor.bufferedCount()); - - //4th - cursor.next(err => { - expect(err).to.not.exist; - - //No more - cursor.next((err, items) => { - expect(err).to.not.exist; - test.ok(items == null); - test.ok(cursor.closed); - done(); - }); - }); - }); - }); - }); - }); - }); - }); - } - }); - - it('shouldHandleLimitLessThanBatchSize', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_limit_less_than_batch_size', (err, collection) => { - expect(err).to.not.exist; - - var limit = 2; - var records = 10; - var batchSize = 4; - var docs = []; - for (var i = 0; i < records; i++) { - docs.push({ a: i }); - } - - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - var cursor = collection.find({}, { batchSize: batchSize, limit: limit }); - //1st - cursor.next(err => { - expect(err).to.not.exist; - test.equal(1, cursor.bufferedCount()); - - //2nd - cursor.next(err => { - expect(err).to.not.exist; - test.equal(0, cursor.bufferedCount()); - - //No more - cursor.next((err, items) => { - expect(err).to.not.exist; - test.ok(items == null); - test.ok(cursor.closed); - done(); - }); - }); - }); - }); - }); - }); - } - }); - - it('shouldHandleSkipLimitChaining', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - var collection = db.collection('shouldHandleSkipLimitChaining'); - - function insert(callback) { - var total = 10; - - for (var i = 0; i < 10; i++) { - collection.insert({ x: i }, configuration.writeConcernMax(), e => { - expect(e).to.not.exist; - total = total - 1; - if (total === 0) callback(); - }); - } - } - - function finished() { - collection.find().toArray((err, items) => { - expect(err).to.not.exist; - test.equal(10, items.length); - - collection - .find() - .limit(5) - .skip(3) - .toArray(function (err, items2) { - expect(err).to.not.exist; - test.equal(5, items2.length); - - // Check that we have the same elements - var numberEqual = 0; - var sliced = items.slice(3, 8); - - for (var i = 0; i < sliced.length; i++) { - if (sliced[i].x === items2[i].x) numberEqual = numberEqual + 1; - } - test.equal(5, numberEqual); - done(); - }); - }); - } - - insert(function () { - finished(); - }); - }); - } - }); - - it('shouldCorrectlyHandleLimitSkipChainingInline', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_limit_skip_chaining_inline', (err, collection) => { - expect(err).to.not.exist; - - function insert(callback) { - var total = 10; - - for (var i = 0; i < 10; i++) { - collection.insert({ x: i }, configuration.writeConcernMax(), e => { - expect(e).to.not.exist; - total = total - 1; - if (total === 0) callback(); - }); - } - } - - function finished() { - collection.find().toArray((err, items) => { - expect(err).to.not.exist; - test.equal(10, items.length); - - collection - .find() - .limit(5) - .skip(3) - .toArray(function (err, items2) { - expect(err).to.not.exist; - test.equal(5, items2.length); - - // Check that we have the same elements - var numberEqual = 0; - var sliced = items.slice(3, 8); - - for (var i = 0; i < sliced.length; i++) { - if (sliced[i].x === items2[i].x) numberEqual = numberEqual + 1; - } - test.equal(5, numberEqual); - done(); - }); - }); - } - - insert(function () { - finished(); - }); - }); - }); - } - }); - - it('shouldCloseCursorNoQuerySent', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_close_no_query_sent', (err, collection) => { - expect(err).to.not.exist; - - const cursor = collection.find(); - cursor.close(err => { - expect(err).to.not.exist; - test.equal(true, cursor.closed); - done(); - }); - }); - }); - } - }); - - it('shouldCorrectlyRefillViaGetMoreCommand', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var COUNT = 1000; - - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_refill_via_get_more', (err, collection) => { - expect(err).to.not.exist; - - function insert(callback) { - var docs = []; - - for (var i = 0; i < COUNT; i++) { - docs.push({ a: i }); - } - - collection.insertMany(docs, configuration.writeConcernMax(), callback); - } - - function finished() { - collection.count((err, count) => { - expect(err).to.not.exist; - test.equal(COUNT, count); - }); - - var total = 0; - collection.find({}, {}).forEach( - item => { - total = total + item.a; - }, - err => { - expect(err).to.not.exist; - test.equal(499500, total); - - collection.count((err, count) => { - expect(err).to.not.exist; - test.equal(COUNT, count); - }); - - collection.count((err, count) => { - expect(err).to.not.exist; - test.equal(COUNT, count); - - var total2 = 0; - collection.find().forEach( - item => { - total2 = total2 + item.a; - }, - err => { - expect(err).to.not.exist; - test.equal(499500, total2); - collection.count((err, count) => { - expect(err).to.not.exist; - test.equal(COUNT, count); - test.equal(total, total2); - done(); - }); - } - ); - }); - } - ); - } - - insert(function () { - finished(); - }); - }); - }); - } - }); - - it('shouldCorrectlyRefillViaGetMoreAlternativeCollection', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_refill_via_get_more_alt_coll', (err, collection) => { - expect(err).to.not.exist; - var COUNT = 1000; - - function insert(callback) { - var docs = []; - - for (var i = 0; i < COUNT; i++) { - docs.push({ a: i }); - } - - collection.insertMany(docs, configuration.writeConcernMax(), callback); - } - - function finished() { - collection.count((err, count) => { - expect(err).to.not.exist; - test.equal(1000, count); - }); - - var total = 0; - collection.find().forEach( - doc => { - total = total + doc.a; - }, - err => { - expect(err).to.not.exist; - test.equal(499500, total); - - collection.count((err, count) => { - expect(err).to.not.exist; - test.equal(1000, count); - }); - - collection.count((err, count) => { - expect(err).to.not.exist; - test.equal(1000, count); - - var total2 = 0; - collection.find().forEach( - doc => { - total2 = total2 + doc.a; - }, - err => { - expect(err).to.not.exist; - expect(total2).to.equal(499500); - - collection.count((err, count) => { - expect(err).to.not.exist; - expect(count).to.equal(1000); - expect(total2).to.equal(total); - done(); - }); - } - ); - }); - } - ); - } - - insert(function () { - finished(); - }); - }); - }); - } - }); - - it('shouldCloseCursorAfterQueryHasBeenSent', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_close_after_query_sent', (err, collection) => { - expect(err).to.not.exist; - - collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - const cursor = collection.find({ a: 1 }); - cursor.next(err => { - expect(err).to.not.exist; - - cursor.close(err => { - expect(err).to.not.exist; - test.equal(true, cursor.closed); - done(); - }); - }); - }); - }); - }); - } - }); - - it('shouldCorrectlyExecuteCursorCountWithFields', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_count_with_fields', (err, collection) => { - expect(err).to.not.exist; - - collection.insertOne({ x: 1, a: 2 }, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - collection - .find({}) - .project({ a: 1 }) - .toArray((err, items) => { - expect(err).to.not.exist; - test.equal(1, items.length); - test.equal(2, items[0].a); - expect(items[0].x).to.not.exist; - done(); - }); - }); - }); - }); - } - }); - - it('shouldCorrectlyCountWithFieldsUsingExclude', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('test_count_with_fields_using_exclude', (err, collection) => { - expect(err).to.not.exist; - - collection.insertOne({ x: 1, a: 2 }, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - collection.find({}, { projection: { x: 0 } }).toArray((err, items) => { - expect(err).to.not.exist; - test.equal(1, items.length); - test.equal(2, items[0].a); - expect(items[0].x).to.not.exist; - done(); - }); - }); - }); - }); - } - }); - - it('Should correctly execute count on cursor', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var docs = []; - - for (var i = 0; i < 1000; i++) { - var d = new Date().getTime() + i * 1000; - docs[i] = { a: i, createdAt: new Date(d) }; - } - - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('Should_correctly_execute_count_on_cursor_1', (err, collection) => { - expect(err).to.not.exist; - - // insert all docs - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - let total = 0; - // Create a cursor for the content - const cursor = collection.find({}); - this.defer(() => cursor.close()); - - cursor.count(err => { - expect(err).to.not.exist; - // Ensure each returns all documents - cursor.forEach( - () => { - total++; - }, - err => { - expect(err).to.not.exist; - cursor.count((err, c) => { - expect(err).to.not.exist; - expect(c).to.equal(1000); - expect(total).to.equal(1000); - done(); - }); - } - ); - }); - }); - }); - }); - } - }); - - it('does not auto destroy streams', function (done) { - const docs = []; - - for (var i = 0; i < 10; i++) { - docs.push({ a: i + 1 }); - } - - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - - const db = client.db(configuration.db); - db.createCollection('does_not_autodestroy_streams', (err, collection) => { - expect(err).to.not.exist; - - collection.insertMany(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - const cursor = collection.find(); - const stream = cursor.stream(); - stream.on('close', () => { - expect.fail('extra close event must not be called'); - }); - stream.on('end', () => { - client.close(); - done(); - }); - stream.on('data', doc => { - expect(doc).to.exist; - }); - stream.resume(); - }); - }); - }); - }); - - it('should be able to stream documents', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var docs = []; - - for (var i = 0; i < 1000; i++) { - docs[i] = { a: i + 1 }; - } - - var count = 0; - - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('Should_be_able_to_stream_documents', (err, collection) => { - expect(err).to.not.exist; - - // insert all docs - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - var paused = 0, - closed = 0, - resumed = 0, - i = 0; - - const cursor = collection.find(); - const stream = cursor.stream(); - - stream.on('data', function (doc) { - test.equal(true, !!doc); - test.equal(true, !!doc.a); - count = count + 1; - - if (paused > 0 && 0 === resumed) { - err = new Error('data emitted during pause'); - return testDone(); - } - - if (++i === 3) { - stream.pause(); - paused++; - - setTimeout(function () { - stream.resume(); - resumed++; - }, 20); - } - }); - - stream.once('error', function (er) { - err = er; - testDone(); - }); - - stream.once('end', function () { - closed++; - testDone(); - }); - - function testDone() { - expect(err).to.not.exist; - test.equal(i, docs.length); - test.equal(1, closed); - test.equal(1, paused); - test.equal(1, resumed); - test.strictEqual(cursor.closed, true); - done(); - } - }); - }); - }); - } - }); - - it('immediately destroying a stream prevents the query from executing', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var i = 0, - docs = [{ b: 2 }, { b: 3 }], - doneCalled = 0; - - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection( - 'immediately_destroying_a_stream_prevents_the_query_from_executing', - (err, collection) => { - expect(err).to.not.exist; - - // insert all docs - collection.insertMany(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - const cursor = collection.find(); - const stream = cursor.stream(); - - stream.on('data', function () { - i++; - }); - - cursor.once('close', testDone('close')); - stream.once('error', testDone('error')); - - stream.destroy(); - - function testDone() { - return err => { - ++doneCalled; - - if (doneCalled === 1) { - expect(err).to.not.exist; - test.strictEqual(0, i); - test.strictEqual(true, cursor.closed); - done(); - } - }; - } - }); - } - ); - }); - } - }); - - it('removes session when cloning an find cursor', async function () { - const collection = await client.db().collection('test'); - - const cursor = collection.find({}); - await cursor.next(); - - const clonedCursor = cursor.clone(); - - expect(cursor).to.have.property('session').not.to.be.null; - expect(clonedCursor).to.have.property('session').to.be.null; - }); - - it('removes session when cloning an aggregation cursor', async function () { - const collection = await client.db().collection('test'); - - const cursor = collection.aggregate([{ $match: {} }]); - await cursor.next(); - - const clonedCursor = cursor.clone(); - - expect(cursor).to.have.property('session').not.to.be.null; - expect(clonedCursor).to.have.property('session').to.be.null; - }); - - it('destroying a stream stops it', async function () { - const db = client.db(); - await db.dropCollection('destroying_a_stream_stops_it').catch(() => null); - const collection = await db.createCollection('destroying_a_stream_stops_it'); - - const docs = Array.from({ length: 10 }, (_, i) => ({ b: i + 1 })); - - await collection.insertMany(docs); - - const cursor = collection.find(); - const stream = cursor.stream(); - - expect(cursor).property('closed', false); - - const willClose = once(cursor, 'close'); - - const dataEvents = on(stream, 'data'); - - for (let i = 0; i < 5; i++) { - let { - value: [doc] - } = await dataEvents.next(); - expect(doc).property('b', i + 1); - } - - // After 5 successful data events, destroy stream - stream.destroy(); - - // We should get a close event on the stream and a close event on the cursor - // We should **not** get an 'error' or an 'end' event, - // the following will throw if either stream or cursor emitted an 'error' event - await Promise.race([ - willClose, - sleep(100).then(() => Promise.reject(new Error('close event never emitted'))) - ]); - }); - - // NOTE: skipped for use of topology manager - it.skip('cursor stream errors', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { requires: { topology: ['single'] } }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('cursor_stream_errors', (err, collection) => { - expect(err).to.not.exist; - - var docs = []; - for (var ii = 0; ii < 10; ++ii) docs.push({ b: ii + 1 }); - - // insert all docs - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - var finished = 0, - i = 0; - - const cursor = collection.find({}, { batchSize: 5 }); - const stream = cursor.stream(); - - stream.on('data', function () { - if (++i === 4) { - // Force restart - configuration.manager.stop(9); - } - }); - - stream.once('close', testDone('close')); - stream.once('error', testDone('error')); - - function testDone() { - return function () { - ++finished; - - if (finished === 2) { - setTimeout(function () { - test.equal(5, i); - test.equal(true, cursor.closed); - client.close(); - - configuration.manager.start().then(function () { - done(); - }); - }, 150); - } - }; - } - }); - }); - }); - } - }); - - it('cursor stream pipe', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('cursor_stream_pipe', (err, collection) => { - expect(err).to.not.exist; - - var docs = []; - 'Aaden Aaron Adrian Aditya Bob Joe'.split(' ').forEach(function (name) { - docs.push({ name: name }); - }); - - // insert all docs - collection.insertMany(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - const filename = path.join(os.tmpdir(), '_nodemongodbnative_stream_out.txt'); - const out = fs.createWriteStream(filename); - const stream = collection.find().stream().map(JSON.stringify); - - stream.pipe(out); - // Wait for output stream to close - out.on('close', testDone); - - function testDone(err) { - // Object.prototype.toString = toString; - test.strictEqual(undefined, err); - var contents = fs.readFileSync(filename, 'utf8'); - test.ok(/Aaden/.test(contents)); - test.ok(/Aaron/.test(contents)); - test.ok(/Adrian/.test(contents)); - test.ok(/Aditya/.test(contents)); - test.ok(/Bob/.test(contents)); - test.ok(/Joe/.test(contents)); - fs.unlinkSync(filename); - done(); - } - }); - }); - }); - } - }); - - it( - 'closes cursors when client is closed even if it has not been exhausted', - { requires: { topology: '!replicaset' } }, - async function () { - await client - .db() - .dropCollection('test_cleanup_tailable') - .catch(() => null); - - const collection = await client - .db() - .createCollection('test_cleanup_tailable', { capped: true, size: 1000, max: 3 }); - - // insert only 2 docs in capped coll of 3 - await collection.insertMany([{ a: 1 }, { a: 1 }]); - - const cursor = collection.find({}, { tailable: true, awaitData: true, maxAwaitTimeMS: 2000 }); - - await cursor.next(); - await cursor.next(); - - const nextCommand = once(client, 'commandStarted'); - // will block for maxAwaitTimeMS (except we are closing the client) - const rejectedEarlyBecauseClientClosed = cursor.next().catch(error => error); - - for ( - let [{ commandName }] = await nextCommand; - commandName !== 'getMore'; - [{ commandName }] = await once(client, 'commandStarted') - ); - - await client.close(); - expect(cursor).to.have.property('closed', true); - - const error = await rejectedEarlyBecauseClientClosed; - expect(error).to.be.instanceOf(MongoClientClosedError); - } - ); - - it('shouldAwaitDataWithDocumentsAvailable', function (done) { - // www.mongodb.com/docs/display/DOCS/Tailable+Cursors - - const configuration = this.configuration; - const client = configuration.newClient({ maxPoolSize: 1 }); - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - const options = { capped: true, size: 8 }; - db.createCollection('should_await_data_no_docs', options, (err, collection) => { - expect(err).to.not.exist; - - // Create cursor with awaitData, and timeout after the period specified - const cursor = collection.find({}, { tailable: true, awaitData: true }); - this.defer(() => cursor.close()); - - cursor.forEach( - () => {}, - err => { - expect(err).to.not.exist; - done(); - } - ); - }); - }); - }); - - context('awaiting data core tailable cursor test', () => { - let client; - let cursor; - - beforeEach(async function () { - client = await this.configuration.newClient().connect(); - }); - - afterEach(async () => { - if (cursor) await cursor.close(); - await client.close(); - }); - - it( - 'should block waiting for new data to arrive when the cursor reaches the end of the capped collection', - { - metadata: { requires: { mongodb: '>=3.2' } }, - async test() { - const db = client.db('cursor_tailable'); - - try { - await db.collection('cursor_tailable').drop(); - // eslint-disable-next-line no-empty - } catch {} - - const collection = await db.createCollection('cursor_tailable', { - capped: true, - size: 10000 - }); - - const res = await collection.insertOne({ a: 1 }); - expect(res).property('insertedId').to.exist; - - cursor = collection.find({}, { batchSize: 2, tailable: true, awaitData: true }); - const doc0 = await cursor.next(); - expect(doc0).to.have.property('a', 1); - - // After 300ms make an insert - const later = runLater(async () => { - const res = await collection.insertOne({ b: 2 }); - expect(res).property('insertedId').to.exist; - }, 300); - - const start = performance.now(); - const doc1 = await cursor.next(); - expect(doc1).to.have.property('b', 2); - const end = performance.now(); - - await later; // make sure this finished, without a failure - - // We should see here that cursor.next blocked for at least 300ms - expect(end - start).to.be.at.least(290); - } - } - ); - }); - - // NOTE: should we continue to let users explicitly `kill` a cursor? - it.skip('Should correctly retry tailable cursor connection', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - // www.mongodb.com/docs/display/DOCS/Tailable+Cursors - - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - const options = { capped: true, size: 8 }; - db.createCollection('should_await_data', options, (err, collection) => { - expect(err).to.not.exist; - - collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - // Create cursor with awaitData, and timeout after the period specified - var cursor = collection.find({}, { tailable: true, awaitData: true }); - cursor.forEach( - () => cursor.kill(), - () => { - // kill cursor b/c cursor is tailable / awaitable - cursor.close(done); - } - ); - }); - }); - }); - } - }); - - it('shouldCorrectExecuteExplainHonoringLimit', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var docs = []; - docs[0] = { - _keywords: [ - 'compact', - 'ii2gd', - 'led', - '24-48v', - 'presse-etoupe', - 'bexbgl1d24483', - 'flash', - '48v', - 'eexd', - 'feu', - 'presse', - 'compris', - 'rouge', - 'etoupe', - 'iic', - 'ii2gdeexdiict5', - 'red', - 'aet' - ] - }; - docs[1] = { - _keywords: [ - 'reducteur', - '06212', - 'd20/16', - 'manch', - 'd20', - 'manchon', - 'ard', - 'sable', - 'irl', - 'red' - ] - }; - docs[2] = { - _keywords: [ - 'reducteur', - '06214', - 'manch', - 'd25/20', - 'd25', - 'manchon', - 'ard', - 'sable', - 'irl', - 'red' - ] - }; - docs[3] = { - _keywords: [ - 'bar', - 'rac', - 'boite', - '6790178', - '50-240/4-35', - '240', - 'branch', - 'coulee', - 'ddc', - 'red', - 'ip2x' - ] - }; - docs[4] = { - _keywords: [ - 'bar', - 'ip2x', - 'boite', - '6790158', - 'ddi', - '240', - 'branch', - 'injectee', - '50-240/4-35?', - 'red' - ] - }; - docs[5] = { - _keywords: [ - 'bar', - 'ip2x', - 'boite', - '6790179', - 'coulee', - '240', - 'branch', - 'sdc', - '50-240/4-35?', - 'red', - 'rac' - ] - }; - docs[6] = { - _keywords: [ - 'bar', - 'ip2x', - 'boite', - '6790159', - '240', - 'branch', - 'injectee', - '50-240/4-35?', - 'sdi', - 'red' - ] - }; - docs[7] = { - _keywords: [ - '6000', - 'r-6000', - 'resin', - 'high', - '739680', - 'red', - 'performance', - 'brd', - 'with', - 'ribbon', - 'flanges' - ] - }; - docs[8] = { _keywords: ['804320', 'for', 'paint', 'roads', 'brd', 'red'] }; - docs[9] = { _keywords: ['38mm', 'padlock', 'safety', '813594', 'brd', 'red'] }; - docs[10] = { _keywords: ['114551', 'r6900', 'for', 'red', 'bmp71', 'brd', 'ribbon'] }; - docs[11] = { - _keywords: ['catena', 'diameter', '621482', 'rings', 'brd', 'legend', 'red', '2mm'] - }; - docs[12] = { - _keywords: ['catena', 'diameter', '621491', 'rings', '5mm', 'brd', 'legend', 'red'] - }; - docs[13] = { - _keywords: ['catena', 'diameter', '621499', 'rings', '3mm', 'brd', 'legend', 'red'] - }; - docs[14] = { - _keywords: ['catena', 'diameter', '621508', 'rings', '5mm', 'brd', 'legend', 'red'] - }; - docs[15] = { - _keywords: [ - 'insert', - 'for', - 'cable', - '3mm', - 'carrier', - '621540', - 'blank', - 'brd', - 'ademark', - 'red' - ] - }; - docs[16] = { - _keywords: [ - 'insert', - 'for', - 'cable', - '621544', - '3mm', - 'carrier', - 'brd', - 'ademark', - 'legend', - 'red' - ] - }; - docs[17] = { - _keywords: ['catena', 'diameter', '6mm', '621518', 'rings', 'brd', 'legend', 'red'] - }; - docs[18] = { - _keywords: ['catena', 'diameter', '621455', '8mm', 'rings', 'brd', 'legend', 'red'] - }; - docs[19] = { - _keywords: ['catena', 'diameter', '621464', 'rings', '5mm', 'brd', 'legend', 'red'] - }; - - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - // Insert all the docs - var collection = db.collection('shouldCorrectExecuteExplainHonoringLimit'); - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - collection.createIndex({ _keywords: 1 }, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - collection - .find({ _keywords: 'red' }) - .limit(10) - .toArray(function (err, result) { - expect(err).to.not.exist; - test.ok(result != null); - - collection - .find({ _keywords: 'red' }, {}) - .limit(10) - .explain(function (err, result) { - expect(err).to.not.exist; - test.ok(result != null); - - done(); - }); - }); - }); - }); - }); - } - }); - - it('shouldNotExplainWhenFalse', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var doc = { name: 'camera', _keywords: ['compact', 'ii2gd', 'led', 'red', 'aet'] }; - - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - var collection = db.collection('shouldNotExplainWhenFalse'); - collection.insert(doc, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - collection - .find({ _keywords: 'red' }) - .limit(10) - .toArray(function (err, result) { - expect(err).to.not.exist; - - test.equal('camera', result[0].name); - done(); - }); - }); - }); - } - }); - - it('shouldFailToSetReadPreferenceOnCursor', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - try { - db.collection('shouldFailToSetReadPreferenceOnCursor') - .find() - .withReadPreference('notsecondary'); - test.ok(false); - } catch (err) {} // eslint-disable-line - - db.collection('shouldFailToSetReadPreferenceOnCursor') - .find() - .withReadPreference('secondary'); - - done(); - }); - } - }); - - it('should allow setting the cursors readConcern through a builder', { - metadata: { requires: { mongodb: '>=3.2' } }, - test: function (done) { - const client = this.configuration.newClient({ monitorCommands: true }); - const events = []; - client.on('commandStarted', event => { - if (event.commandName === 'find') { - events.push(event); - } - }); - const db = client.db(this.configuration.db); - const cursor = db.collection('foo').find().withReadConcern('local'); - expect(cursor).property('readConcern').to.have.property('level').equal('local'); - - cursor.toArray(err => { - expect(err).to.not.exist; - - expect(events).to.have.length(1); - const findCommand = events[0]; - expect(findCommand).nested.property('command.readConcern').to.eql({ level: 'local' }); - client.close(done); - }); - } - }); - - it('shouldNotFailDueToStackOverflowEach', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('shouldNotFailDueToStackOverflowEach', (err, collection) => { - expect(err).to.not.exist; - - var docs = []; - var total = 0; - for (var i = 0; i < 30000; i++) docs.push({ a: i }); - var allDocs = []; - var left = 0; - - while (docs.length > 0) { - allDocs.push(docs.splice(0, 1000)); - } - // Get all batches we must insert - left = allDocs.length; - var totalI = 0; - - // Execute inserts - for (i = 0; i < left; i++) { - collection.insert(allDocs.shift(), configuration.writeConcernMax(), function (err, d) { - expect(err).to.not.exist; - - left = left - 1; - totalI = totalI + d.length; - - if (left === 0) { - collection.find({}).forEach( - () => { - total++; - }, - err => { - expect(err).to.not.exist; - expect(total).to.equal(30000); - done(); - } - ); - } - }); - } - }); - }); - } - }); - - it('should not fail due to stack overflow toArray', async function () { - const configuration = this.configuration; - const db = client.db(configuration.db); - const collection = await db.createCollection('shouldNotFailDueToStackOverflowToArray'); - - var docs = Array.from({ length: 30000 }, (_, i) => ({ a: i })); - var allDocs = []; - var left = 0; - - while (docs.length > 0) { - allDocs.push(docs.splice(0, 1000)); - } - // Get all batches we must insert - left = allDocs.length; - var totalI = 0; - var timeout = 0; - - // Execute inserts - for (let i = 0; i < left; i++) { - await sleep(timeout); - - const d = await collection.insert(allDocs.shift()); - left = left - 1; - totalI = totalI + d.length; - - if (left === 0) { - const items = await collection.find({}).toArray(); - expect(items).to.have.a.lengthOf(3000); - } - timeout = timeout + 100; - } - - await client.close(); - }); - - it('should correctly skip and limit', function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - - const db = client.db(configuration.db); - var collection = db.collection('shouldCorrectlySkipAndLimit'); - var docs = []; - for (var i = 0; i < 100; i++) docs.push({ a: i, OrderNumber: i }); - - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - collection - .find({}, { OrderNumber: 1 }) - .skip(10) - .limit(10) - .toArray((err, items) => { - expect(err).to.not.exist; - test.equal(10, items[0].OrderNumber); - - collection - .find({}, { OrderNumber: 1 }) - .skip(10) - .limit(10) - .count() - .then(count => { - test.equal(10, count); - client.close(done); - }); - }); - }); - }); - }); - - it('shouldFailToTailANormalCollection', function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - var collection = db.collection('shouldFailToTailANormalCollection'); - var docs = []; - for (var i = 0; i < 100; i++) docs.push({ a: i, OrderNumber: i }); - - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - const cursor = collection.find({}, { tailable: true }); - cursor.forEach( - () => {}, - err => { - test.ok(err instanceof Error); - test.ok(typeof err.code === 'number'); - - // Close cursor b/c we did not exhaust cursor - cursor.close(); - done(); - } - ); - }); - }); - }); - - it('shouldCorrectlyUseFindAndCursorCount', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - - // DOC_LINE var client = new MongoClient(new Server('localhost', 27017)); - // DOC_START - // Establish connection to db - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - - // Create a lot of documents to insert - var docs = []; - for (var i = 0; i < 100; i++) { - docs.push({ a: i }); - } - - // Create a collection - db.createCollection('test_close_function_on_cursor_2', (err, collection) => { - expect(err).to.not.exist; - - // Insert documents into collection - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - const cursor = collection.find({}); - - cursor.count((err, count) => { - expect(err).to.not.exist; - test.equal(100, count); - - done(); - }); - }); - }); - }); - // DOC_END - } - }); - - it('should correctly apply hint to count command for cursor', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { - topology: ['single', 'replicaset', 'sharded'], - mongodb: '>2.5.5' - } - }, - - test: function (done) { - const configuration = this.configuration; - - // DOC_LINE var client = new MongoClient(new Server('localhost', 27017)); - // DOC_START - // Establish connection to db - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - var col = db.collection('count_hint'); - - col.insert([{ i: 1 }, { i: 2 }], { writeConcern: { w: 1 } }, err => { - expect(err).to.not.exist; - - col.createIndex({ i: 1 }, err => { - expect(err).to.not.exist; - - col.find({ i: 1 }, { hint: '_id_' }).count((err, count) => { - expect(err).to.not.exist; - test.equal(1, count); - - col.find({}, { hint: '_id_' }).count((err, count) => { - expect(err).to.not.exist; - test.equal(2, count); - - col.find({ i: 1 }, { hint: 'BAD HINT' }).count(err => { - test.ok(err != null); - - col.createIndex({ x: 1 }, { sparse: true }, err => { - expect(err).to.not.exist; - - col.find({ i: 1 }, { hint: 'x_1' }).count((err, count) => { - expect(err).to.not.exist; - test.equal(0, count); - - col.find({}, { hint: 'i_1' }).count((err, count) => { - expect(err).to.not.exist; - test.equal(2, count); - - done(); - }); - }); - }); - }); - }); - }); - }); - }); - }); - // DOC_END - } - }); - - it('Terminate each after first document by returning false', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - - // Create a lot of documents to insert - var docs = []; - for (var i = 0; i < 100; i++) { - docs.push({ a: i }); - } - - // Create a collection - db.createCollection('terminate_each_returning_false', (err, collection) => { - expect(err).to.not.exist; - - // Insert documents into collection - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - var finished = false; - - collection.find({}).forEach( - doc => { - expect(doc).to.exist; - test.equal(finished, false); - finished = true; - - done(); - return false; - }, - err => { - expect(err).to.not.exist; - } - ); - }); - }); - }); - } - }); - - it('Should correctly handle maxTimeMS as part of findOne options', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - var donkey = { - color: 'brown' - }; - - db.collection('donkies').insertOne(donkey, function (err, result) { - expect(err).to.not.exist; - - var query = { _id: result.insertedId }; - var options = { maxTimeMS: 1000 }; - - db.collection('donkies').findOne(query, options, function (err, doc) { - expect(err).to.not.exist; - test.equal('brown', doc.color); - - done(); - }); - }); - }); - } - }); - - it('Should correctly handle batchSize of 2', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - const collectionName = 'should_correctly_handle_batchSize_2'; - db.collection(collectionName).insert([{ x: 1 }, { x: 2 }, { x: 3 }], err => { - expect(err).to.not.exist; - - const cursor = db.collection(collectionName).find({}, { batchSize: 2 }); - this.defer(() => cursor.close()); - - cursor.next(err => { - expect(err).to.not.exist; - - cursor.next(err => { - expect(err).to.not.exist; - - cursor.next(err => { - expect(err).to.not.exist; - done(); - }); - }); - }); - }); - }); - } - }); - - it('Should report database name and collection name', { - metadata: { requires: { topology: ['single'] } }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - const cursor = db.collection('myCollection').find({}); - test.equal('myCollection', cursor.namespace.collection); - test.equal('integration_tests', cursor.namespace.db); - - done(); - }); - } - }); - - it('Should correctly execute count on cursor with maxTimeMS', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var docs = []; - - for (var i = 0; i < 1000; i++) { - var d = new Date().getTime() + i * 1000; - docs[i] = { a: i, createdAt: new Date(d) }; - } - - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection( - 'Should_correctly_execute_count_on_cursor_2', - function (err, collection) { - expect(err).to.not.exist; - - // insert all docs - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - // Create a cursor for the content - var cursor = collection.find({}); - cursor.limit(100); - cursor.skip(10); - cursor.count({ maxTimeMS: 1000 }, err => { - expect(err).to.not.exist; - - // Create a cursor for the content - var cursor = collection.find({}); - cursor.limit(100); - cursor.skip(10); - cursor.maxTimeMS(100); - - cursor.count(err => { - expect(err).to.not.exist; - done(); - }); - }); - }); - } - ); - }); - } - }); - - it('Should correctly execute count on cursor with maxTimeMS set using legacy method', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var docs = []; - - for (var i = 0; i < 1000; i++) { - var d = new Date().getTime() + i * 1000; - docs[i] = { a: i, createdAt: new Date(d) }; - } - - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection( - 'Should_correctly_execute_count_on_cursor_3', - function (err, collection) { - expect(err).to.not.exist; - - // insert all docs - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - // Create a cursor for the content - var cursor = collection.find({}, { maxTimeMS: 100 }); - cursor.toArray(err => { - expect(err).to.not.exist; - - done(); - }); - }); - } - ); - }); - } - }); - - it('Should correctly apply map to toArray', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var docs = []; - - for (var i = 0; i < 1000; i++) { - var d = new Date().getTime() + i * 1000; - docs[i] = { a: i, createdAt: new Date(d) }; - } - - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - var collection = db.collection('map_toArray'); - - // insert all docs - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - // Create a cursor for the content - var cursor = collection - .find({}) - .map(function () { - return { a: 1 }; - }) - .batchSize(5) - .limit(10); - - cursor.toArray(function (err, docs) { - expect(err).to.not.exist; - test.equal(10, docs.length); - - // Ensure all docs where mapped - docs.forEach(doc => { - expect(doc).property('a').to.equal(1); - }); - - done(); - }); - }); - }); - } - }); - - it('Should correctly apply map to next', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - const docs = []; - for (var i = 0; i < 1000; i++) { - const d = new Date().getTime() + i * 1000; - docs[i] = { a: i, createdAt: new Date(d) }; - } - - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - const collection = db.collection('map_next'); - - // insert all docs - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - // Create a cursor for the content - const cursor = collection - .find({}) - .map(function () { - return { a: 1 }; - }) - .batchSize(5) - .limit(10); - - this.defer(() => cursor.close()); - cursor.next((err, doc) => { - expect(err).to.not.exist; - test.equal(1, doc.a); - done(); - }); - }); - }); - } - }); - - it('Should correctly apply map to each', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var docs = []; - - for (var i = 0; i < 1000; i++) { - var d = new Date().getTime() + i * 1000; - docs[i] = { a: i, createdAt: new Date(d) }; - } - - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - const collection = db.collection('map_each'); - - // insert all docs - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - // Create a cursor for the content - var cursor = collection - .find({}) - .map(function () { - return { a: 1 }; - }) - .batchSize(5) - .limit(10); - - cursor.forEach( - doc => { - test.equal(1, doc.a); - }, - err => { - expect(err).to.not.exist; - done(); - } - ); - }); - }); - } - }); - - it('Should correctly apply map to forEach', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var docs = []; - - for (var i = 0; i < 1000; i++) { - var d = new Date().getTime() + i * 1000; - docs[i] = { a: i, createdAt: new Date(d) }; - } - - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - var collection = db.collection('map_forEach'); - - // insert all docs - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - // Create a cursor for the content - var cursor = collection - .find({}) - .map(function () { - return { a: 2 }; - }) - .map(function (x) { - return { a: x.a * x.a }; - }) - .batchSize(5) - .limit(10); - - cursor.forEach( - doc => { - test.equal(4, doc.a); - }, - err => { - expect(err).to.not.exist; - done(); - } - ); - }); - }); - } - }); - - it('Should correctly apply multiple uses of map and apply forEach', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var docs = []; - - for (var i = 0; i < 1000; i++) { - var d = new Date().getTime() + i * 1000; - docs[i] = { a: i, createdAt: new Date(d) }; - } - - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - var collection = db.collection('map_mapmapforEach'); - - // insert all docs - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - // Create a cursor for the content - var cursor = collection - .find({}) - .map(function () { - return { a: 1 }; - }) - .batchSize(5) - .limit(10); - - cursor.forEach( - doc => { - expect(doc).property('a').to.equal(1); - }, - err => { - expect(err).to.not.exist; - done(); - } - ); - }); - }); - } - }); - - it('Should correctly apply skip and limit to large set of documents', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { requires: { topology: ['single', 'replicaset'] } }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - var collection = db.collection('cursor_limit_skip_correctly'); - - // Insert x number of docs - var ordered = collection.initializeUnorderedBulkOp(); - - for (var i = 0; i < 6000; i++) { - ordered.insert({ a: i }); - } - - ordered.execute({ writeConcern: { w: 1 } }, err => { - expect(err).to.not.exist; - - // Let's attempt to skip and limit - collection - .find({}) - .limit(2016) - .skip(2016) - .toArray(function (err, docs) { - expect(err).to.not.exist; - test.equal(2016, docs.length); - - done(); - }); - }); - }); - } - }); - - it('should tail cursor using maxAwaitTimeMS for 3.2 or higher', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { requires: { topology: ['single'], mongodb: '<7.0.0' } }, - - test: function (done) { - const configuration = this.configuration; - const client = configuration.newClient(); - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - var options = { capped: true, size: 8 }; - db.createCollection( - 'should_await_data_max_awaittime_ms', - options, - function (err, collection) { - expect(err).to.not.exist; - - collection.insert({ a: 1 }, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - // Create cursor with awaitData, and timeout after the period specified - var cursor = collection - .find({}) - .addCursorFlag('tailable', true) - .addCursorFlag('awaitData', true) - .maxAwaitTimeMS(500); - - const s = new Date(); - cursor.forEach( - () => { - setTimeout(() => cursor.close(), 300); - }, - () => { - test.ok(new Date().getTime() - s.getTime() >= 500); - done(); - } - ); - }); - } - ); - }); - } - }); - - it('Should not emit any events after close event emitted due to cursor killed', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { requires: { topology: ['single', 'replicaset'] } }, - - test: function (done) { - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - var collection = db.collection('cursor_limit_skip_correctly'); - - // Insert x number of docs - var ordered = collection.initializeUnorderedBulkOp(); - - for (var i = 0; i < 100; i++) { - ordered.insert({ a: i }); - } - - ordered.execute({ writeConcern: { w: 1 } }, err => { - expect(err).to.not.exist; - - // Let's attempt to skip and limit - var cursor = collection.find({}).batchSize(10); - const stream = cursor.stream(); - stream.on('data', function () { - stream.destroy(); - }); - - cursor.on('close', function () { - done(); - }); - }); - }); - } - }); - - it('shouldCorrectlyExecuteEnsureIndexWithNoCallback', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var docs = []; - - for (var i = 0; i < 1; i++) { - var d = new Date().getTime() + i * 1000; - docs[i] = { createdAt: new Date(d) }; - } - - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection( - 'shouldCorrectlyExecuteEnsureIndexWithNoCallback', - function (err, collection) { - expect(err).to.not.exist; - - // ensure index of createdAt index - collection.createIndex({ createdAt: 1 }, err => { - expect(err).to.not.exist; - - // insert all docs - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - // Find with sort - collection - .find() - .sort(['createdAt', 'asc']) - .toArray((err, items) => { - expect(err).to.not.exist; - - test.equal(1, items.length); - done(); - }); - }); - }); - } - ); - }); - } - }); - - it('Should correctly execute count on cursor with limit and skip', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var docs = []; - - for (var i = 0; i < 50; i++) { - var d = new Date().getTime() + i * 1000; - docs[i] = { a: i, createdAt: new Date(d) }; - } - - const configuration = this.configuration; - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('negative_batch_size_and_limit_set', (err, collection) => { - expect(err).to.not.exist; - - // insert all docs - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - // Create a cursor for the content - var cursor = collection.find({}); - cursor - .limit(100) - .skip(0) - .count(function (err, c) { - expect(err).to.not.exist; - test.equal(50, c); - - var cursor = collection.find({}); - cursor - .limit(100) - .skip(0) - .toArray(err => { - expect(err).to.not.exist; - test.equal(50, c); - - done(); - }); - }); - }); - }); - }); - } - }); - - it('Should correctly handle negative batchSize and set the limit', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var docs = []; - const configuration = this.configuration; - - for (var i = 0; i < 50; i++) { - var d = new Date().getTime() + i * 1000; - docs[i] = { a: i, createdAt: new Date(d) }; - } - - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection( - 'Should_correctly_execute_count_on_cursor_1_', - function (err, collection) { - expect(err).to.not.exist; - - // insert all docs - collection.insert(docs, configuration.writeConcernMax(), err => { - expect(err).to.not.exist; - - // Create a cursor for the content - var cursor = collection.find({}); - cursor.batchSize(-10).next(err => { - expect(err).to.not.exist; - test.ok(cursor.id.equals(BSON.Long.ZERO)); - - done(); - }); - }); - } - ); - }); - } - }); - - it('Correctly decorate the cursor count command with skip, limit, hint, readConcern', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var started = []; - const configuration = this.configuration; - const client = configuration.newClient(configuration.writeConcernMax(), { - maxPoolSize: 1, - monitorCommands: true - }); - client.on('commandStarted', function (event) { - if (event.commandName === 'count') started.push(event); - }); - - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.collection('cursor_count_test', { readConcern: { level: 'local' } }) - .find({ project: '123' }) - .limit(5) - .skip(5) - .hint({ project: 1 }) - .count(err => { - expect(err).to.not.exist; - test.equal(1, started.length); - if (started[0].command.readConcern) - test.deepEqual({ level: 'local' }, started[0].command.readConcern); - test.deepEqual({ project: 1 }, started[0].command.hint); - test.equal(5, started[0].command.skip); - test.equal(5, started[0].command.limit); - - done(); - }); - }); - } - }); - - it.skip('Correctly decorate the collection count command with skip, limit, hint, readConcern', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var started = []; - - const configuration = this.configuration; - client.on('commandStarted', function (event) { - if (event.commandName === 'count') started.push(event); - }); - - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.collection('cursor_count_test1', { readConcern: { level: 'local' } }).count( - { - project: '123' - }, - { - readConcern: { level: 'local' }, - limit: 5, - skip: 5, - hint: { project: 1 } - }, - err => { - expect(err).to.not.exist; - test.equal(1, started.length); - if (started[0].command.readConcern) - test.deepEqual({ level: 'local' }, started[0].command.readConcern); - test.deepEqual({ project: 1 }, started[0].command.hint); - test.equal(5, started[0].command.skip); - test.equal(5, started[0].command.limit); - - done(); - } - ); - }); - } - }); - - // NOTE: should we allow users to explicitly `kill` a cursor anymore? - it.skip('Should properly kill a cursor', { - metadata: { - requires: { - topology: ['single', 'replicaset', 'sharded'], - mongodb: '>=3.2.0' - } - }, - - test: function () { - // Load up the documents - const docs = []; - for (let i = 0; i < 1000; i += 1) { - docs.push({ - a: i - }); - } - - const configuration = this.configuration; - - let cleanup = () => {}; - let caughtError = undefined; - - return ( - client - .connect() - .then(client => { - this.defer(() => client.close()); - const db = client.db(configuration.db); - const collection = db.collection('cursorkilltest1'); - - // Insert 1000 documents - return collection.insert(docs).then(() => { - // Generate cursor for find operation - const cursor = collection.find({}); - this.defer(() => cursor.close()); - - // Iterate cursor past first element - return cursor - .next() - .then(() => cursor.next()) - .then(() => { - // Confirm that cursorId is non-zero - const longId = cursor.id; - expect(longId).to.be.an('object'); - expect(Object.getPrototypeOf(longId)).to.haveOwnProperty('_bsontype', 'Long'); - const id = longId.toNumber(); - - expect(id).to.not.equal(0); - - // Kill cursor - return new Promise((resolve, reject) => - cursor.kill((err, r) => (err ? reject(err) : resolve(r))) - ).then(response => { - // sharded clusters will return a long, single return integers - if ( - response && - response.cursorsKilled && - Array.isArray(response.cursorsKilled) - ) { - response.cursorsKilled = response.cursorsKilled.map(id => - typeof id === 'number' ? BSON.Long.fromNumber(id) : id - ); - } - - expect(response.ok).to.equal(1); - expect(response.cursorsKilled[0].equals(longId)).to.be.ok; - }); - }); - }); - }) - - // Clean up. Make sure that even in case of error, we still always clean up connection - .catch(e => (caughtError = e)) - .then(cleanup) - .then(() => { - if (caughtError) { - throw caughtError; - } - }) - ); - } - }); - - it('should return implicit session to pool when client-side cursor exhausts results on initial query', async function () { - const configuration = this.configuration; - const client = configuration.newClient(); - - await client.connect(); - const db = client.db(configuration.db); - const collection = db.collection('cursor_session_tests'); - - await collection.insertMany([{ a: 1, b: 2 }]); - const cursor = collection.find({}); - - await cursor.next(); // implicit close, cursor is exhausted - expect(client.s.activeSessions.size).to.equal(0); - await cursor.close(); - await client.close(); - }); - - it('should return implicit session to pool when client-side cursor exhausts results after a getMore', async function () { - const db = client.db(this.configuration.db); - const collection = db.collection('cursor_session_tests2'); - - const docs = [ - { a: 1, b: 2 }, - { a: 3, b: 4 }, - { a: 5, b: 6 }, - { a: 7, b: 8 }, - { a: 9, b: 10 } - ]; - - await collection.insertMany(docs); - - const cursor = await collection.find({}, { batchSize: 3 }); - for (let i = 0; i < 3; ++i) { - await cursor.next(); - expect(client.s.activeSessions.size).to.equal(1); - } - - await cursor.next(); - expect(client.s.activeSessions.size, 'session not checked in after cursor exhausted').to.equal( - 0 - ); - - await cursor.close(); - }); - - describe('#clone', function () { - let client; - let db; - let collection; - - beforeEach(function () { - client = this.configuration.newClient({ w: 1 }); - - return client.connect().then(client => { - db = client.db(this.configuration.db); - collection = db.collection('test_coll'); - }); - }); - - afterEach(function () { - return client.close(); - }); - - context('when executing on a find cursor', function () { - it('removes the existing session from the cloned cursor', async function () { - const docs = [{ name: 'test1' }, { name: 'test2' }]; - await collection.insertMany(docs); - - const cursor = collection.find({}, { batchSize: 1 }); - try { - const doc = await cursor.next(); - expect(doc).to.exist; - - const clonedCursor = cursor.clone(); - expect(clonedCursor.session).to.be.null; - } finally { - await cursor.close(); - } - }); - }); - - context('when executing on an aggregation cursor', function () { - it('removes the existing session from the cloned cursor', async function () { - const docs = [{ name: 'test1' }, { name: 'test2' }]; - await collection.insertMany(docs); - - const cursor = collection.aggregate([{ $match: {} }], { batchSize: 1 }); - try { - const doc = await cursor.next(); - expect(doc).to.exist; - - const clonedCursor = cursor.clone(); - expect(clonedCursor.session).to.be.null; - } finally { - await cursor.close(); - } - }); - }); - }); - - describe('Cursor forEach Error propagation', function () { - let configuration; - let client; - let cursor; - let collection; - - beforeEach(async function () { - configuration = this.configuration; - client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 }); - await client.connect().catch(() => { - expect.fail('Failed to connect to client'); - }); - collection = client.db(configuration.db).collection('cursor_session_tests2'); - }); - - afterEach(async function () { - await cursor.close(); - await client.close(); - }); - - // NODE-2035 - it('should propagate error when exceptions are thrown from an awaited forEach call', async function () { - const docs = [{ unique_key_2035: 1 }, { unique_key_2035: 2 }, { unique_key_2035: 3 }]; - await collection.insertMany(docs).catch(() => { - expect.fail('Failed to insert documents'); - }); - cursor = collection.find({ - unique_key_2035: { - $exists: true - } - }); - await cursor - .forEach(() => { - throw new Error('FAILURE IN FOREACH CALL'); - }) - .then(() => { - expect.fail('Error in forEach call not caught'); - }) - .catch(err => { - expect(err.message).to.deep.equal('FAILURE IN FOREACH CALL'); - }); - }); - }); - - it('should return a promise when no callback supplied to forEach method', function () { - const configuration = this.configuration; - const client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 }); - - return client.connect().then(() => { - this.defer(() => client.close()); - - const db = client.db(configuration.db); - const collection = db.collection('cursor_session_tests2'); - const cursor = collection.find(); - this.defer(() => cursor.close()); - - const promise = cursor.forEach(() => {}); - expect(promise).to.exist.and.to.be.an.instanceof(Promise); - return promise; - }); - }); - - it('should return false when exhausted and hasNext called more than once', function (done) { - const configuration = this.configuration; - const client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 }); - - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - db.createCollection('cursor_hasNext_test').then(() => { - const cursor = db.collection('cursor_hasNext_test').find(); - this.defer(() => cursor.close()); - - cursor - .hasNext() - .then(val1 => { - expect(val1).to.equal(false); - return cursor.hasNext(); - }) - .then(val2 => { - expect(val2).to.equal(false); - done(); - }); - }); - }); - }); - - const testTransformStream = (config, _done) => { - const client = config.client; - const configuration = config.configuration; - const collectionName = config.collectionName; - const transformFunc = config.transformFunc; - const expectedSet = config.expectedSet; - - let cursor; - const done = err => cursor.close(err2 => client.close(err3 => _done(err || err2 || err3))); - - client.connect((err, client) => { - expect(err).to.not.exist; - - const db = client.db(configuration.db); - let collection; - const docs = [ - { _id: 0, a: { b: 1, c: 0 } }, - { _id: 1, a: { b: 1, c: 0 } }, - { _id: 2, a: { b: 1, c: 0 } } - ]; - const resultSet = new Set(); - Promise.resolve() - .then(() => db.createCollection(collectionName)) - .then(() => (collection = db.collection(collectionName))) - .then(() => collection.insertMany(docs)) - .then(() => { - cursor = collection.find(); - return cursor.stream().map(transformFunc ?? (doc => doc)); - }) - .then(stream => { - stream.on('data', function (doc) { - resultSet.add(doc); - }); - - stream.once('end', function () { - expect(resultSet).to.deep.equal(expectedSet); - done(); - }); - - stream.once('error', e => { - done(e); - }); - }) - .catch(e => done(e)); - }); - }; - - it('stream should apply the supplied transformation function to each document in the stream', function (done) { - const configuration = this.configuration; - const client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 }); - const expectedDocs = [ - { _id: 0, b: 1, c: 0 }, - { _id: 1, b: 1, c: 0 }, - { _id: 2, b: 1, c: 0 } - ]; - const config = { - client: client, - configuration: configuration, - collectionName: 'stream-test-transform', - transformFunc: doc => ({ _id: doc._id, b: doc.a.b, c: doc.a.c }), - expectedSet: new Set(expectedDocs) - }; - - testTransformStream(config, done); - }); - - it('stream should return a stream of unmodified docs if no transform function applied', function (done) { - const configuration = this.configuration; - const client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 }); - const expectedDocs = [ - { _id: 0, a: { b: 1, c: 0 } }, - { _id: 1, a: { b: 1, c: 0 } }, - { _id: 2, a: { b: 1, c: 0 } } - ]; - const config = { - client: client, - configuration: configuration, - collectionName: 'transformStream-test-notransform', - transformFunc: null, - expectedSet: new Set(expectedDocs) - }; - - testTransformStream(config, done); - }); - - it.skip('should apply parent read preference to count command', function (done) { - // NOTE: this test is skipped because mongo orchestration does not test sharded clusters - // with secondaries. This behavior should be unit tested - - const configuration = this.configuration; - const client = configuration.newClient( - { w: 1, readPreference: ReadPreference.SECONDARY }, - { maxPoolSize: 1, connectWithNoPrimary: true } - ); - - client.connect((err, client) => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const db = client.db(configuration.db); - let collection, cursor, spy; - const close = e => cursor.close(() => client.close(() => done(e))); - - Promise.resolve() - .then(() => new Promise(resolve => setTimeout(() => resolve(), 500))) - .then(() => db.createCollection('test_count_readPreference')) - .then(() => (collection = db.collection('test_count_readPreference'))) - .then(() => collection.find()) - .then(_cursor => (cursor = _cursor)) - .then(() => (spy = sinon.spy(cursor.topology, 'command'))) - .then(() => cursor.count()) - .then(() => - expect(spy.firstCall.args[2]) - .to.have.nested.property('readPreference.mode') - .that.equals('secondary') - ) - .then(() => close()) - .catch(e => close(e)); - }); - }); - - it('should not consume first document on hasNext when streaming', function (done) { - const configuration = this.configuration; - const client = configuration.newClient({ w: 1 }, { maxPoolSize: 1 }); - - client.connect(err => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const collection = client.db().collection('documents'); - collection.drop(() => { - const docs = [{ a: 1 }, { a: 2 }, { a: 3 }]; - collection.insertMany(docs, err => { - expect(err).to.not.exist; - - const cursor = collection.find({}, { sort: { a: 1 } }); - cursor.hasNext((err, hasNext) => { - expect(err).to.not.exist; - expect(hasNext).to.be.true; - - const collected = []; - const stream = new Writable({ - objectMode: true, - write: (chunk, encoding, next) => { - collected.push(chunk); - next(undefined, chunk); - } - }); - - const cursorStream = cursor.stream(); - - cursorStream.on('end', () => { - expect(collected).to.have.length(3); - expect(collected).to.eql(docs); - done(); - }); - - cursorStream.pipe(stream); - }); - }); - }); - }); - }); - - describe('transforms', function () { - it('should correctly apply map transform to cursor as readable stream', function (done) { - const configuration = this.configuration; - const client = configuration.newClient(); - client.connect(err => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const docs = 'Aaden Aaron Adrian Aditya Bob Joe'.split(' ').map(x => ({ name: x })); - const coll = client.db(configuration.db).collection('cursor_stream_mapping'); - coll.insertMany(docs, err => { - expect(err).to.not.exist; - - const bag = []; - const stream = coll - .find() - .project({ _id: 0, name: 1 }) - .map(doc => ({ mapped: doc })) - .stream() - .on('data', doc => bag.push(doc)); - - stream.on('error', done).on('end', () => { - expect(bag.map(x => x.mapped)).to.eql(docs.map(x => ({ name: x.name }))); - done(); - }); - }); - }); - }); - - it('should correctly apply map transform when converting cursor to array', function (done) { - const configuration = this.configuration; - const client = configuration.newClient(); - client.connect(err => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - const docs = 'Aaden Aaron Adrian Aditya Bob Joe'.split(' ').map(x => ({ name: x })); - const coll = client.db(configuration.db).collection('cursor_toArray_mapping'); - coll.insertMany(docs, err => { - expect(err).to.not.exist; - - coll - .find() - .project({ _id: 0, name: 1 }) - .map(doc => ({ mapped: doc })) - .toArray((err, mappedDocs) => { - expect(err).to.not.exist; - expect(mappedDocs.map(x => x.mapped)).to.eql(docs.map(x => ({ name: x.name }))); - done(); - }); - }); - }); - }); - }); - - context('sort', function () { - const findSort = (input, output) => - function (done) { - const client = this.configuration.newClient({ monitorCommands: true }); - const events = []; - client.on('commandStarted', event => { - if (event.commandName === 'find') { - events.push(event); - } - }); - const db = client.db('test'); - const collection = db.collection('test_sort_dos'); - const cursor = collection.find({}, { sort: input }); - cursor.next(err => { - expect(err).to.not.exist; - expect(events[0].command.sort).to.be.instanceOf(Map); - expect(Array.from(events[0].command.sort)).to.deep.equal(Array.from(output)); - client.close(done); - }); - }; - - const cursorSort = (input, output) => - function (done) { - const client = this.configuration.newClient({ monitorCommands: true }); - const events = []; - client.on('commandStarted', event => { - if (event.commandName === 'find') { - events.push(event); - } - }); - const db = client.db('test'); - const collection = db.collection('test_sort_dos'); - const cursor = collection.find({}).sort(input); - cursor.next(err => { - expect(err).to.not.exist; - expect(events[0].command.sort).to.be.instanceOf(Map); - expect(Array.from(events[0].command.sort)).to.deep.equal(Array.from(output)); - client.close(done); - }); - }; - - it('should use find options object', findSort({ alpha: 1 }, new Map([['alpha', 1]]))); - it('should use find options string', findSort('alpha', new Map([['alpha', 1]]))); - it('should use find options shallow array', findSort(['alpha', 1], new Map([['alpha', 1]]))); - it('should use find options deep array', findSort([['alpha', 1]], new Map([['alpha', 1]]))); - - it('should use cursor.sort object', cursorSort({ alpha: 1 }, new Map([['alpha', 1]]))); - it('should use cursor.sort string', cursorSort('alpha', new Map([['alpha', 1]]))); - it('should use cursor.sort shallow array', cursorSort(['alpha', 1], new Map([['alpha', 1]]))); - it('should use cursor.sort deep array', cursorSort([['alpha', 1]], new Map([['alpha', 1]]))); - - it('formatSort - one key', () => { - // TODO (NODE-3236): These are unit tests for a standalone function and should be moved out of the cursor context file - expect(formatSort('alpha')).to.deep.equal(new Map([['alpha', 1]])); - expect(formatSort(['alpha'])).to.deep.equal(new Map([['alpha', 1]])); - expect(formatSort('alpha', 1)).to.deep.equal(new Map([['alpha', 1]])); - expect(formatSort('alpha', 'asc')).to.deep.equal(new Map([['alpha', 1]])); - expect(formatSort([['alpha', 'asc']])).to.deep.equal(new Map([['alpha', 1]])); - expect(formatSort('alpha', 'ascending')).to.deep.equal(new Map([['alpha', 1]])); - expect(formatSort({ alpha: 1 })).to.deep.equal(new Map([['alpha', 1]])); - expect(formatSort('beta')).to.deep.equal(new Map([['beta', 1]])); - expect(formatSort(['beta'])).to.deep.equal(new Map([['beta', 1]])); - expect(formatSort('beta', -1)).to.deep.equal(new Map([['beta', -1]])); - expect(formatSort('beta', 'desc')).to.deep.equal(new Map([['beta', -1]])); - expect(formatSort('beta', 'descending')).to.deep.equal(new Map([['beta', -1]])); - expect(formatSort({ beta: -1 })).to.deep.equal(new Map([['beta', -1]])); - expect(formatSort({ alpha: { $meta: 'hi' } })).to.deep.equal( - new Map([['alpha', { $meta: 'hi' }]]) - ); - }); - - it('formatSort - multi key', () => { - expect(formatSort(['alpha', 'beta'])).to.deep.equal( - new Map([ - ['alpha', 1], - ['beta', 1] - ]) - ); - expect(formatSort({ alpha: 1, beta: 1 })).to.deep.equal( - new Map([ - ['alpha', 1], - ['beta', 1] - ]) - ); - expect( - formatSort([ - ['alpha', 'asc'], - ['beta', 'ascending'] - ]) - ).to.deep.equal( - new Map([ - ['alpha', 1], - ['beta', 1] - ]) - ); - expect( - formatSort( - new Map([ - ['alpha', 'asc'], - ['beta', 'ascending'] - ]) - ) - ).to.deep.equal( - new Map([ - ['alpha', 1], - ['beta', 1] - ]) - ); - expect( - formatSort([ - ['3', 'asc'], - ['1', 'ascending'] - ]) - ).to.deep.equal( - new Map([ - ['3', 1], - ['1', 1] - ]) - ); - expect(formatSort({ alpha: { $meta: 'hi' }, beta: 'ascending' })).to.deep.equal( - new Map([ - ['alpha', { $meta: 'hi' }], - ['beta', 1] - ]) - ); - }); - - it('should use allowDiskUse option on sort', { - metadata: { requires: { mongodb: '>=4.4' } }, - test: async function () { - const events = []; - client.on('commandStarted', event => { - if (event.commandName === 'find') { - events.push(event); - } - }); - const db = client.db('test'); - const collection = db.collection('test_sort_allow_disk_use'); - const cursor = collection.find({}).sort(['alpha', 1]).allowDiskUse(); - await cursor.next(); - const { command } = events.shift(); - expect(command.sort).to.deep.equal(new Map([['alpha', 1]])); - expect(command.allowDiskUse).to.be.true; - } - }); - - it('should error if allowDiskUse option used without sort', { - metadata: { requires: { mongodb: '>=4.4' } }, - test: async function () { - const client = this.configuration.newClient(); - const db = client.db('test'); - const collection = db.collection('test_sort_allow_disk_use'); - expect(() => collection.find({}).allowDiskUse()).to.throw( - /Option "allowDiskUse" requires a sort specification/ - ); - await client.close(); - } - }); - }); -}); diff --git a/test/integration/node-specific/bson-options/ignore_undefined.test.js b/test/integration/node-specific/bson-options/ignore_undefined.test.js deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/test/integration/node-specific/bson-options/promote_buffers.test.js b/test/integration/node-specific/bson-options/promote_buffers.test.js deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/test/integration/node-specific/bson-options/promote_values.test.js b/test/integration/node-specific/bson-options/promote_values.test.js deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/test/integration/node-specific/cursor_stream.test.js b/test/integration/node-specific/cursor_stream.test.js deleted file mode 100644 index 5325fad75a5..00000000000 --- a/test/integration/node-specific/cursor_stream.test.js +++ /dev/null @@ -1,354 +0,0 @@ -'use strict'; -const { expect } = require('chai'); -const { Binary } = require('../../mongodb'); -const { setTimeout, setImmediate } = require('timers'); - -describe.skip('Cursor Streams', function () { - let client; - - beforeEach(async function () { - client = this.configuration.newClient(); - }); - - afterEach(async function () { - await client.close(); - }); - - it('should stream documents with pause and resume for fetching', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function (done) { - var self = this; - var docs = []; - var j = 0; - - for (var i = 0; i < 3000; i++) { - docs.push({ a: i }); - } - - var allDocs = []; - while (docs.length > 0) { - allDocs.push(docs.splice(0, 1000)); - } - - var client = self.configuration.newClient(self.configuration.writeConcernMax(), { - maxPoolSize: 1 - }); - - client.connect(function (err, client) { - var db = client.db(self.configuration.db); - db.createCollection( - 'test_streaming_function_with_limit_for_fetching2', - function (err, collection) { - var left = allDocs.length; - for (var i = 0; i < allDocs.length; i++) { - collection.insert(allDocs[i], { writeConcern: { w: 1 } }, function (err) { - expect(err).to.not.exist; - - left = left - 1; - - if (left === 0) { - // Perform a find to get a cursor - var stream = collection.find({}).stream(); - var data = []; - - // For each data item - stream.on('data', function () { - data.push(1); - j = j + 1; - stream.pause(); - - collection.findOne({}, function (err) { - expect(err).to.not.exist; - stream.resume(); - }); - }); - - // When the stream is done - stream.on('end', function () { - setTimeout(() => { - let err; - try { - expect(data).to.have.length(3000); - } catch (e) { - err = e; - } - - client.close(() => done(err)); - }, 1000); - }); - } - }); - } - } - ); - }); - } - }); - - it('should stream 10K documents', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function (done) { - var self = this; - var docs = []; - - for (var i = 0; i < 10000; i++) { - docs.push({ a: i, bin: new Binary(Buffer.alloc(256)) }); - } - - var j = 0; - - var allDocs = []; - while (docs.length > 0) { - allDocs.push(docs.splice(0, 1000)); - } - - var client = self.configuration.newClient(self.configuration.writeConcernMax(), { - maxPoolSize: 1 - }); - - client.connect(function (err, client) { - var db = client.db(self.configuration.db); - db.createCollection( - 'test_streaming_function_with_limit_for_fetching_2', - function (err, collection) { - var left = allDocs.length; - for (var i = 0; i < allDocs.length; i++) { - collection.insert(allDocs[i], { writeConcern: { w: 1 } }, function (err) { - expect(err).to.not.exist; - left = left - 1; - - if (left === 0) { - // Perform a find to get a cursor - var stream = collection.find({}).stream(); - var data = []; - - // For each data item - stream.on('data', function () { - j = j + 1; - stream.pause(); - data.push(1); - - collection.findOne({}, function (err) { - expect(err).to.not.exist; - stream.resume(); - }); - }); - - // When the stream is done - stream.on('end', function () { - setTimeout(() => { - let err; - try { - expect(data).to.have.length(10000); - } catch (e) { - err = e; - } - - client.close(err2 => done(err || err2)); - }, 1000); - }); - } - }); - } - } - ); - }); - } - }); - - it('should trigger massive amount of getMores', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function (done) { - var self = this; - var docs = []; - var counter = 0; - var counter2 = 0; - - for (var i = 0; i < 1000; i++) { - docs.push({ a: i, bin: new Binary(Buffer.alloc(256)) }); - } - - var client = self.configuration.newClient(self.configuration.writeConcernMax(), { - maxPoolSize: 1 - }); - - client.connect(function (err, client) { - var db = client.db(self.configuration.db); - db.createCollection( - 'test_streaming_function_with_limit_for_fetching_3', - function (err, collection) { - collection.insert(docs, { writeConcern: { w: 1 } }, function (err) { - expect(err).to.not.exist; - - // Perform a find to get a cursor - var stream = collection.find({}).stream(); - - // For each data item - stream.on('data', function () { - counter++; - stream.pause(); - stream.resume(); - counter2++; - }); - - // When the stream is done - stream.on('end', function () { - expect(counter).to.equal(1000); - expect(counter2).to.equal(1000); - client.close(done); - }); - }); - } - ); - }); - } - }); - - it('should stream documents across getMore command and count correctly', async function () { - if (process.platform === 'darwin') { - this.skipReason = 'TODO(NODE-3819): Unskip flaky MacOS tests.'; - return this.skip(); - } - - const db = client.db(); - const collection = db.collection('streaming'); - const updateCollection = db.collection('update_within_streaming'); - - await collection.drop().catch(() => null); - await updateCollection.drop().catch(() => null); - - const docs = Array.from({ length: 10 }, (_, i) => ({ - _id: i, - b: new Binary(Buffer.alloc(1024)) - })); - - await collection.insertMany(docs); - // Set the batchSize to be a 5th of the total docCount to make getMores happen - const stream = collection.find({}, { batchSize: 2 }).stream(); - - let done; - const end = new Promise((resolve, reject) => { - done = error => (error != null ? reject(error) : resolve()); - }); - - stream.on('end', () => { - updateCollection - .findOne({ id: 1 }) - .then(function (doc) { - expect(doc.count).to.equal(9); - done(); - }) - .catch(done) - .finally(() => client.close()); - }); - - let docCount = 0; - stream.on('data', data => { - stream.pause(); - try { - expect(data).to.have.property('_id', docCount); - } catch (assertionError) { - return done(assertionError); - } - - if (docCount++ === docs.length - 1) { - stream.resume(); - return; - } - - updateCollection - .updateMany({ id: 1 }, { $inc: { count: 1 } }, { writeConcern: { w: 1 }, upsert: true }) - .then(() => { - stream.resume(); - }) - .catch(done); - }); - - return end; - }); - - it('should correctly error out stream', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function (done) { - var self = this; - var client = self.configuration.newClient(self.configuration.writeConcernMax(), { - maxPoolSize: 1 - }); - - client.connect((err, client) => { - const db = client.db(self.configuration.db); - const cursor = db.collection('myCollection').find({ - timestamp: { $ltx: '1111' } // Error in query. - }); - - let error; - const stream = cursor.stream(); - stream.on('error', err => (error = err)); - cursor.on('close', function () { - setImmediate(() => { - expect(error).to.exist; - client.close(done); - }); - }); - - stream.pipe(process.stdout); - }); - } - }); - - it('should correctly stream cursor after stream', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function (done) { - var self = this; - var client = self.configuration.newClient(self.configuration.writeConcernMax(), { - maxPoolSize: 1 - }); - - client.connect(function (err, client) { - var db = client.db(self.configuration.db); - var docs = []; - var received = []; - - for (var i = 0; i < 1000; i++) { - docs.push({ a: i, field: 'hello world' }); - } - - db.collection('cursor_sort_stream').insertMany(docs, function (err) { - expect(err).to.not.exist; - - var cursor = db - .collection('cursor_sort_stream') - .find({}) - .project({ a: 1 }) - .sort({ a: -1 }); - const stream = cursor.stream(); - - stream.on('end', function () { - expect(received).to.have.length(1000); - - client.close(done); - }); - - stream.on('data', function (d) { - received.push(d); - }); - }); - }); - } - }); -}); From f251f50fcb2f567be94d2bdede2745221f279bc6 Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Fri, 24 Oct 2025 18:16:19 +0200 Subject: [PATCH 41/41] unskip operations test --- test/integration/node-specific/operation_examples.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/node-specific/operation_examples.test.ts b/test/integration/node-specific/operation_examples.test.ts index 447db2832cd..d8e07af5462 100644 --- a/test/integration/node-specific/operation_examples.test.ts +++ b/test/integration/node-specific/operation_examples.test.ts @@ -5,7 +5,7 @@ import { enumToString } from '../../../src/utils'; import { sleep as delay } from '../../tools/utils'; import { setupDatabase } from '../shared'; -describe.skip('Operations', function () { +describe('Operations', function () { let client: MongoClient; beforeEach(async function () {