From 9b1c0bfcd9618ab3a698d1bf1c2cd55a030fb292 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Wed, 16 Oct 2024 16:39:29 -0400 Subject: [PATCH] docs(NODE-6413): update rst references to md --- src/cmap/auth/mongo_credentials.ts | 4 +- src/cmap/auth/mongodb_aws.ts | 2 +- src/cmap/handshake/client_metadata.ts | 2 +- src/error.ts | 14 +- src/mongo_client.ts | 2 +- src/read_concern.ts | 2 +- src/sdam/server_description.ts | 4 +- src/sdam/server_selection.ts | 7 +- src/utils.ts | 7 +- test/spec/auth/README.rst | 53 - test/spec/change-streams/README.rst | 241 --- .../client-side-encryption/tests/README.rst | 1744 ----------------- .../client-side-operations-timeout/README.rst | 616 ------ .../command-logging-and-monitoring/README.rst | 60 - .../README.rst | 228 --- test/spec/index-management/README.rst | 223 --- .../initial-dns-seedlist-discovery/README.rst | 135 -- test/spec/load-balancers/README.rst | 68 - test/spec/max-staleness/README.rst | 72 - test/spec/read-write-concern/README.rst | 68 - test/spec/retryable-reads/README.rst | 249 --- .../README.rst | 265 --- .../monitoring/README.rst | 12 - test/spec/server-selection/README.rst | 73 - test/spec/sessions/README.rst | 46 - test/spec/transactions/README.rst | 663 ------- test/spec/uri-options/README.rst | 54 - test/spec/versioned-api/README.rst | 37 - 28 files changed, 26 insertions(+), 4925 deletions(-) delete mode 100644 test/spec/auth/README.rst delete mode 100644 test/spec/change-streams/README.rst delete mode 100644 test/spec/client-side-encryption/tests/README.rst delete mode 100644 test/spec/client-side-operations-timeout/README.rst delete mode 100644 test/spec/command-logging-and-monitoring/README.rst delete mode 100644 test/spec/connection-monitoring-and-pooling/README.rst delete mode 100644 test/spec/index-management/README.rst delete mode 100644 test/spec/initial-dns-seedlist-discovery/README.rst delete mode 100644 test/spec/load-balancers/README.rst delete mode 100644 test/spec/max-staleness/README.rst delete mode 100644 test/spec/read-write-concern/README.rst delete mode 100644 test/spec/retryable-reads/README.rst delete mode 100644 test/spec/server-discovery-and-monitoring/README.rst delete mode 100644 test/spec/server-discovery-and-monitoring/monitoring/README.rst delete mode 100644 test/spec/server-selection/README.rst delete mode 100644 test/spec/sessions/README.rst delete mode 100644 test/spec/transactions/README.rst delete mode 100644 test/spec/uri-options/README.rst delete mode 100644 test/spec/versioned-api/README.rst diff --git a/src/cmap/auth/mongo_credentials.ts b/src/cmap/auth/mongo_credentials.ts index b97bb2def09..00cbfc6b004 100644 --- a/src/cmap/auth/mongo_credentials.ts +++ b/src/cmap/auth/mongo_credentials.ts @@ -10,7 +10,9 @@ import { GSSAPICanonicalizationValue } from './gssapi'; import type { OIDCCallbackFunction } from './mongodb_oidc'; import { AUTH_MECHS_AUTH_SRC_EXTERNAL, AuthMechanism } from './providers'; -// https://github.com/mongodb/specifications/blob/master/source/auth/auth.rst +/** + * @see https://github.com/mongodb/specifications/blob/master/source/auth/auth.md + */ function getDefaultAuthMechanism(hello: Document | null): AuthMechanism { if (hello) { // If hello contains saslSupportedMechs, use scram-sha-256 diff --git a/src/cmap/auth/mongodb_aws.ts b/src/cmap/auth/mongodb_aws.ts index d034f425d17..72859f49676 100644 --- a/src/cmap/auth/mongodb_aws.ts +++ b/src/cmap/auth/mongodb_aws.ts @@ -107,7 +107,7 @@ export class MongoDBAWS extends AuthProvider { if (!ByteUtils.equals(serverNonce.subarray(0, nonce.byteLength), nonce)) { // throw because the serverNonce's leading 32 bytes must equal the client nonce's 32 bytes - // https://github.com/mongodb/specifications/blob/875446db44aade414011731840831f38a6c668df/source/auth/auth.rst#id11 + // https://github.com/mongodb/specifications/blob/master/source/auth/auth.md#conversation-5 // TODO(NODE-3483) throw new MongoRuntimeError('Server nonce does not begin with client nonce'); diff --git a/src/cmap/handshake/client_metadata.ts b/src/cmap/handshake/client_metadata.ts index 090b4c7bfcf..1e825ed2bf7 100644 --- a/src/cmap/handshake/client_metadata.ts +++ b/src/cmap/handshake/client_metadata.ts @@ -11,7 +11,7 @@ const NODE_DRIVER_VERSION = require('../../../package.json').version; /** * @public - * @see https://github.com/mongodb/specifications/blob/master/source/mongodb-handshake/handshake.rst#hello-command + * @see https://github.com/mongodb/specifications/blob/master/source/mongodb-handshake/handshake.md#hello-command */ export interface ClientMetadata { driver: { diff --git a/src/error.ts b/src/error.ts index a9178389486..a3120a93880 100644 --- a/src/error.ts +++ b/src/error.ts @@ -16,14 +16,14 @@ const kErrorLabels = Symbol('errorLabels'); /** * @internal * The legacy error message from the server that indicates the node is not a writable primary - * https://github.com/mongodb/specifications/blob/b07c26dc40d04ac20349f989db531c9845fdd755/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#not-writable-primary-and-node-is-recovering + * https://github.com/mongodb/specifications/blob/921232976f9913cf17415b5ef937ee772e45e6ae/source/server-discovery-and-monitoring/server-discovery-and-monitoring.md#not-writable-primary-and-node-is-recovering */ export const LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE = new RegExp('not master', 'i'); /** * @internal * The legacy error message from the server that indicates the node is not a primary or secondary - * https://github.com/mongodb/specifications/blob/b07c26dc40d04ac20349f989db531c9845fdd755/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#not-writable-primary-and-node-is-recovering + * https://github.com/mongodb/specifications/blob/921232976f9913cf17415b5ef937ee772e45e6ae/source/server-discovery-and-monitoring/server-discovery-and-monitoring.md#not-writable-primary-and-node-is-recovering */ export const LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE = new RegExp( 'not master or secondary', @@ -33,7 +33,7 @@ export const LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE = new RegExp( /** * @internal * The error message from the server that indicates the node is recovering - * https://github.com/mongodb/specifications/blob/b07c26dc40d04ac20349f989db531c9845fdd755/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#not-writable-primary-and-node-is-recovering + * https://github.com/mongodb/specifications/blob/921232976f9913cf17415b5ef937ee772e45e6ae/source/server-discovery-and-monitoring/server-discovery-and-monitoring.md#not-writable-primary-and-node-is-recovering */ export const NODE_IS_RECOVERING_ERROR_MESSAGE = new RegExp('node is recovering', 'i'); @@ -69,7 +69,7 @@ export const MONGODB_ERROR_CODES = Object.freeze({ ReadConcernMajorityNotAvailableYet: 134 } as const); -// From spec@https://github.com/mongodb/specifications/blob/f93d78191f3db2898a59013a7ed5650352ef6da8/source/change-streams/change-streams.rst#resumable-error +// From spec https://github.com/mongodb/specifications/blob/921232976f9913cf17415b5ef937ee772e45e6ae/source/change-streams/change-streams.md#resumable-error export const GET_MORE_RESUMABLE_CODES = new Set([ MONGODB_ERROR_CODES.HostUnreachable, MONGODB_ERROR_CODES.HostNotFound, @@ -1303,7 +1303,7 @@ export class MongoWriteConcernError extends MongoServerError { } } -// https://github.com/mongodb/specifications/blob/master/source/retryable-reads/retryable-reads.rst#retryable-error +// https://github.com/mongodb/specifications/blob/master/source/retryable-reads/retryable-reads.md#retryable-error const RETRYABLE_READ_ERROR_CODES = new Set([ MONGODB_ERROR_CODES.HostUnreachable, MONGODB_ERROR_CODES.HostNotFound, @@ -1320,7 +1320,7 @@ const RETRYABLE_READ_ERROR_CODES = new Set([ MONGODB_ERROR_CODES.ReadConcernMajorityNotAvailableYet ]); -// see: https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst#terms +// see: https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.md#terms const RETRYABLE_WRITE_ERROR_CODES = RETRYABLE_READ_ERROR_CODES; export function needsRetryableWriteLabel( @@ -1457,7 +1457,7 @@ export function isNodeShuttingDownError(err: MongoError): boolean { * then the pool will be cleared, and server state will completely reset * locally. * - * @see https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#not-master-and-node-is-recovering + * @see https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.md#not-writable-primary-and-node-is-recovering */ export function isSDAMUnrecoverableError(error: MongoError): boolean { // NOTE: null check is here for a strictly pre-CMAP world, a timeout or diff --git a/src/mongo_client.ts b/src/mongo_client.ts index 49201910362..5ad91ecdcb4 100644 --- a/src/mongo_client.ts +++ b/src/mongo_client.ts @@ -251,7 +251,7 @@ export interface MongoClientOptions extends BSONSerializeOptions, SupportedNodeC * * @remarks * Automatic encryption is an enterprise only feature that only applies to operations on a collection. Automatic encryption is not supported for operations on a database or view, and operations that are not bypassed will result in error - * (see [libmongocrypt: Auto Encryption Allow-List](https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/client-side-encryption.rst#libmongocrypt-auto-encryption-allow-list)). To bypass automatic encryption for all operations, set bypassAutoEncryption=true in AutoEncryptionOpts. + * (see [libmongocrypt: Auto Encryption Allow-List](https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/client-side-encryption.md#libmongocrypt-auto-encryption-allow-list)). To bypass automatic encryption for all operations, set bypassAutoEncryption=true in AutoEncryptionOpts. * * Automatic encryption requires the authenticated user to have the [listCollections privilege action](https://www.mongodb.com/docs/manual/reference/command/listCollections/#dbcmd.listCollections). * diff --git a/src/read_concern.ts b/src/read_concern.ts index 93c5688a443..1118c377f5f 100644 --- a/src/read_concern.ts +++ b/src/read_concern.ts @@ -31,7 +31,7 @@ export class ReadConcern { * A spec test exists that allows level to be any string. * "invalid readConcern with out stage" * @see ./test/spec/crud/v2/aggregate-out-readConcern.json - * @see https://github.com/mongodb/specifications/blob/master/source/read-write-concern/read-write-concern.rst#unknown-levels-and-additional-options-for-string-based-readconcerns + * @see https://github.com/mongodb/specifications/blob/master/source/read-write-concern/read-write-concern.md#unknown-levels-and-additional-options-for-string-based-readconcerns */ this.level = ReadConcernLevel[level] ?? level; } diff --git a/src/sdam/server_description.ts b/src/sdam/server_description.ts index aadf523d722..73f4d6354ad 100644 --- a/src/sdam/server_description.ts +++ b/src/sdam/server_description.ts @@ -159,8 +159,8 @@ export class ServerDescription { } /** - * Determines if another `ServerDescription` is equal to this one per the rules defined - * in the {@link https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#serverdescription|SDAM spec} + * Determines if another `ServerDescription` is equal to this one per the rules defined in the SDAM specification. + * @see https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.md */ equals(other?: ServerDescription | null): boolean { // Despite using the comparator that would determine a nullish topologyVersion as greater than diff --git a/src/sdam/server_selection.ts b/src/sdam/server_selection.ts index bb262efa337..409ef646ddf 100644 --- a/src/sdam/server_selection.ts +++ b/src/sdam/server_selection.ts @@ -75,7 +75,9 @@ export function secondaryWritableServerSelector( /** * Reduces the passed in array of servers by the rules of the "Max Staleness" specification - * found here: https://github.com/mongodb/specifications/blob/master/source/max-staleness/max-staleness.rst + * found here: + * + * @see https://github.com/mongodb/specifications/blob/master/source/max-staleness/max-staleness.md * * @param readPreference - The read preference providing max staleness guidance * @param topologyDescription - The topology description @@ -212,7 +214,8 @@ function tagSetReducer( /** * Reduces a list of servers to ensure they fall within an acceptable latency window. This is * further specified in the "Server Selection" specification, found here: - * https://github.com/mongodb/specifications/blob/master/source/server-selection/server-selection.rst + * + * @see https://github.com/mongodb/specifications/blob/master/source/server-selection/server-selection.md * * @param topologyDescription - The topology description * @param servers - The list of servers to reduce diff --git a/src/utils.ts b/src/utils.ts index 6d5a67d3ab9..6bc1b1d3008 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -1097,8 +1097,11 @@ export function shuffle(sequence: Iterable, limit = 0): Array { return limit % items.length === 0 ? items : items.slice(lowerBound); } -// TODO(NODE-4936): read concern eligibility for commands should be codified in command construction -// @see https://github.com/mongodb/specifications/blob/master/source/read-write-concern/read-write-concern.rst#read-concern +/** + * TODO(NODE-4936): read concern eligibility for commands should be codified in command construction + * @internal + * @see https://github.com/mongodb/specifications/blob/master/source/read-write-concern/read-write-concern.md#read-concern + */ export function commandSupportsReadConcern(command: Document): boolean { if (command.aggregate || command.count || command.distinct || command.find || command.geoNear) { return true; diff --git a/test/spec/auth/README.rst b/test/spec/auth/README.rst deleted file mode 100644 index 3bf86f4fb1f..00000000000 --- a/test/spec/auth/README.rst +++ /dev/null @@ -1,53 +0,0 @@ -========== -Auth Tests -========== - -The YAML and JSON files in this directory tree are platform-independent tests -that drivers can use to prove their conformance to the Auth Spec at least with -respect to connection string URI input. - -Drivers should do additional unit testing if there are alternate ways of -configuring credentials on a client. - -Driver must also conduct the prose tests in the Auth Spec test plan section. - -Format ------- - -Each YAML file contains an object with a single ``tests`` key. This key is an -array of test case objects, each of which have the following keys: - -- ``description``: A string describing the test. -- ``uri``: A string containing the URI to be parsed. -- ``valid:`` A boolean indicating if the URI should be considered valid. -- ``credential``: If null, the credential must not be considered configured for the - the purpose of deciding if the driver should authenticate to the topology. If non-null, - it is an object containing one or more of the following properties of a credential: - - - ``username``: A string containing the username. For auth mechanisms - that do not utilize a password, this may be the entire ``userinfo`` token - from the connection string. - - ``password``: A string containing the password. - - ``source``: A string containing the authentication database. - - ``mechanism``: A string containing the authentication mechanism. A null value for - this key is used to indicate that a mechanism wasn't specified and that mechanism - negotiation is required. Test harnesses should modify the mechanism test as needed - to assert this condition. - - ``mechanism_properties``: A document containing mechanism-specific properties. It - specifies a subset of properties that must match. If a key exists in the test data, - it must exist with the corresponding value in the credential. Other values may - exist in the credential without failing the test. - -If any key is missing, no assertion about that key is necessary. Except as -specified explicitly above, if a key is present, but the test value is null, -the observed value for that key must be uninitialized (whatever that means for -a given driver and data type). - -Implementation notes -==================== - -Testing whether a URI is valid or not should simply be a matter of checking -whether URI parsing (or MongoClient construction) raises an error or exception. - -If a credential is configured, its properties must be compared to the -``credential`` field. diff --git a/test/spec/change-streams/README.rst b/test/spec/change-streams/README.rst deleted file mode 100644 index 4e3bfbc94aa..00000000000 --- a/test/spec/change-streams/README.rst +++ /dev/null @@ -1,241 +0,0 @@ -.. role:: javascript(code) - :language: javascript - -============== -Change Streams -============== - -.. contents:: - --------- - -Introduction -============ - -The YAML and JSON files in this directory are platform-independent tests that -drivers can use to prove their conformance to the Change Streams Spec. - -Several prose tests, which are not easily expressed in YAML, are also presented -in this file. Those tests will need to be manually implemented by each driver. - -Subdirectories for Test Formats -------------------------------- - -This document describes the legacy format for change streams tests. -Tests in this legacy format are located under ``./legacy/``. - -New change streams tests should be written in the `unified test format <../../unified-test-format/unified-test-format.rst>`__ -and placed under ``./unified/``. - -Spec Test Format -================ - -Each YAML file has the following keys: - -- ``database_name``: The default database -- ``collection_name``: The default collection -- ``database2_name``: Another database -- ``collection2_name``: Another collection -- ``tests``: An array of tests that are to be run independently of each other. - Each test will have some of the following fields: - - - ``description``: The name of the test. - - ``minServerVersion``: The minimum server version to run this test against. If not present, assume there is no minimum server version. - - ``maxServerVersion``: Reserved for later use - - ``failPoint``: Optional configureFailPoint command document to run to configure a fail point on the primary server. - - ``target``: The entity on which to run the change stream. Valid values are: - - - ``collection``: Watch changes on collection ``database_name.collection_name`` - - ``database``: Watch changes on database ``database_name`` - - ``client``: Watch changes on entire clusters - - ``topology``: An array of server topologies against which to run the test. - Valid topologies are ``single``, ``replicaset``, ``sharded``, and "load-balanced". - - ``changeStreamPipeline``: An array of additional aggregation pipeline stages to add to the change stream - - ``changeStreamOptions``: Additional options to add to the changeStream - - ``operations``: Array of documents, each describing an operation. Each document has the following fields: - - - ``database``: Database against which to run the operation - - ``collection``: Collection against which to run the operation - - ``name``: Name of the command to run - - ``arguments`` (optional): Object of arguments for the command (ex: document to insert) - - - ``expectations``: Optional list of command-started events in Extended JSON format - - ``result``: Document with ONE of the following fields: - - - ``error``: Describes an error received during the test - - ``success``: An Extended JSON array of documents expected to be received from the changeStream - -Spec Test Match Function -======================== - -The definition of MATCH or MATCHES in the Spec Test Runner is as follows: - -- MATCH takes two values, ``expected`` and ``actual`` -- Notation is "Assert [actual] MATCHES [expected] -- Assertion passes if ``expected`` is a subset of ``actual``, with the value ``42`` acting as placeholders for "any value" - -Pseudocode implementation of ``actual`` MATCHES ``expected``: - -:: - - If expected is "42" or 42: - Assert that actual exists (is not null or undefined) - Else: - Assert that actual is of the same JSON type as expected - If expected is a JSON array: - For every idx/value in expected: - Assert that actual[idx] MATCHES value - Else if expected is a JSON object: - For every key/value in expected - Assert that actual[key] MATCHES value - Else: - Assert that expected equals actual - -The expected values for ``result.success`` and ``expectations`` are written in Extended JSON. Drivers may adopt any of the following approaches to comparisons, as long as they are consistent: - -- Convert ``actual`` to Extended JSON and compare to ``expected`` -- Convert ``expected`` and ``actual`` to BSON, and compare them -- Convert ``expected`` and ``actual`` to native equivalents of JSON, and compare them - -Spec Test Runner -================ - -Before running the tests - -- Create a MongoClient ``globalClient``, and connect to the server. -When executing tests against a sharded cluster, ``globalClient`` must only connect to one mongos. This is because tests -that set failpoints will only work consistently if both the ``configureFailPoint`` and failing commands are sent to the -same mongos. - -For each YAML file, for each element in ``tests``: - -- If ``topology`` does not include the topology of the server instance(s), skip this test. -- Use ``globalClient`` to - - - Drop the database ``database_name`` - - Drop the database ``database2_name`` - - Create the database ``database_name`` and the collection ``database_name.collection_name`` - - Create the database ``database2_name`` and the collection ``database2_name.collection2_name`` - - If the the ``failPoint`` field is present, configure the fail point on the primary server. See - `Server Fail Point <../../transactions/tests#server-fail-point>`_ in the - Transactions spec test documentation for more information. - -- Create a new MongoClient ``client`` -- Begin monitoring all APM events for ``client``. (If the driver uses global listeners, filter out all events that do not originate with ``client``). Filter out any "internal" commands (e.g. ``isMaster``) -- Using ``client``, create a changeStream ``changeStream`` against the specified ``target``. Use ``changeStreamPipeline`` and ``changeStreamOptions`` if they are non-empty. Capture any error. -- If there was no error, use ``globalClient`` and run every operation in ``operations`` in serial against the server until all operations have been executed or an error is thrown. Capture any error. -- If there was no error and ``result.error`` is set, iterate ``changeStream`` once and capture any error. -- If there was no error and ``result.success`` is non-empty, iterate ``changeStream`` until it returns as many changes as there are elements in the ``result.success`` array or an error is thrown. Capture any error. -- Close ``changeStream`` -- If there was an error: - - - Assert that an error was expected for the test. - - Assert that the error MATCHES ``result.error`` - -- Else: - - - Assert that no error was expected for the test - - Assert that the changes received from ``changeStream`` MATCH the results in ``result.success`` - -- If there are any ``expectations`` - - - For each (``expected``, ``idx``) in ``expectations`` - - If ``actual[idx]`` is a ``killCursors`` event, skip it and move to ``actual[idx+1]``. - - Else assert that ``actual[idx]`` MATCHES ``expected`` - - Note: the change stream test command event expectations cover a - prefix subset of all command events published by the driver. - The test runner MUST verify that, if there are N expectations, that the - first N events published by the driver match the expectations, and - MUST NOT inspect any subsequent events published by the driver. - -- Close the MongoClient ``client`` - -After running all tests - -- Close the MongoClient ``globalClient`` -- Drop database ``database_name`` -- Drop database ``database2_name`` - -Iterating the Change Stream ---------------------------- - -Although synchronous drivers must provide a `non-blocking mode of iteration <../change-streams.rst#not-blocking-on-iteration>`_, asynchronous drivers may not have such a mechanism. Those drivers with only a blocking mode of iteration should be careful not to iterate the change stream unnecessarily, as doing so could cause the test runner to block indefinitely. For this reason, the test runner procedure above advises drivers to take a conservative approach to iteration. - -If the test expects an error and one was not thrown by either creating the change stream or executing the test's operations, iterating the change stream once allows for an error to be thrown by a ``getMore`` command. If the test does not expect any error, the change stream should be iterated only until it returns as many result documents as are expected by the test. - -Testing on Sharded Clusters ---------------------------- - -When writing data on sharded clusters, majority-committed data does not always show up in the response of the first -``getMore`` command after the data is written. This is because in sharded clusters, no data from shard A may be returned -until all other shard reports an entry that sorts after the change in shard A. - -To account for this, drivers MUST NOT rely on change stream documents in certain batches. For example, if expecting two -documents in a change stream, these may not be part of the same ``getMore`` response, or even be produced in two -subsequent ``getMore`` responses. Drivers MUST allow for a ``getMore`` to produce empty batches when testing on a -sharded cluster. By default, this can take up to 10 seconds, but can be controlled by enabling the ``writePeriodicNoops`` -server parameter and configuring the ``periodNoopIntervalSecs`` parameter. Choosing lower values allows for running -change stream tests with smaller timeouts. - -Prose Tests -=========== - -The following tests have not yet been automated, but MUST still be tested. All tests SHOULD be run on both replica sets and sharded clusters unless otherwise specified: - -#. ``ChangeStream`` must continuously track the last seen ``resumeToken`` -#. ``ChangeStream`` will throw an exception if the server response is missing the resume token (if wire version is < 8, this is a driver-side error; for 8+, this is a server-side error) -#. After receiving a ``resumeToken``, ``ChangeStream`` will automatically resume one time on a resumable error with the initial pipeline and options, except for the addition/update of a ``resumeToken``. -#. ``ChangeStream`` will not attempt to resume on any error encountered while executing an ``aggregate`` command. Note that retryable reads may retry ``aggregate`` commands. Drivers should be careful to distinguish retries from resume attempts. Alternatively, drivers may specify ``retryReads=false`` or avoid using a `retryable error <../../retryable-reads/retryable-reads.rst#retryable-error>`_ for this test. -#. **Removed** -#. ``ChangeStream`` will perform server selection before attempting to resume, using initial ``readPreference`` -#. Ensure that a cursor returned from an aggregate command with a cursor id and an initial empty batch is not closed on the driver side. -#. The ``killCursors`` command sent during the "Resume Process" must not be allowed to throw an exception. -#. ``$changeStream`` stage for ``ChangeStream`` against a server ``>=4.0`` and ``<4.0.7`` that has not received any results yet MUST include a ``startAtOperationTime`` option when resuming a change stream. -#. **Removed** -#. For a ``ChangeStream`` under these conditions: - - - Running against a server ``>=4.0.7``. - - The batch is empty or has been iterated to the last document. - - Expected result: - - - ``getResumeToken`` must return the ``postBatchResumeToken`` from the current command response. - -#. For a ``ChangeStream`` under these conditions: - - - Running against a server ``<4.0.7``. - - The batch is empty or has been iterated to the last document. - - Expected result: - - - ``getResumeToken`` must return the ``_id`` of the last document returned if one exists. - - ``getResumeToken`` must return ``resumeAfter`` from the initial aggregate if the option was specified. - - If ``resumeAfter`` was not specified, the ``getResumeToken`` result must be empty. - -#. For a ``ChangeStream`` under these conditions: - - - The batch is not empty. - - The batch has been iterated up to but not including the last element. - - Expected result: - - - ``getResumeToken`` must return the ``_id`` of the previous document returned. - -#. For a ``ChangeStream`` under these conditions: - - - The batch is not empty. - - The batch hasn’t been iterated at all. - - Only the initial ``aggregate`` command has been executed. - - Expected result: - - - ``getResumeToken`` must return ``startAfter`` from the initial aggregate if the option was specified. - - ``getResumeToken`` must return ``resumeAfter`` from the initial aggregate if the option was specified. - - If neither the ``startAfter`` nor ``resumeAfter`` options were specified, the ``getResumeToken`` result must be empty. - - Note that this test cannot be run against sharded topologies because in that case the initial ``aggregate`` command only establishes cursors on the shards and always returns an empty ``firstBatch``. - -#. **Removed** -#. **Removed** -#. ``$changeStream`` stage for ``ChangeStream`` started with ``startAfter`` against a server ``>=4.1.1`` that has not received any results yet MUST include a ``startAfter`` option and MUST NOT include a ``resumeAfter`` option when resuming a change stream. -#. ``$changeStream`` stage for ``ChangeStream`` started with ``startAfter`` against a server ``>=4.1.1`` that has received at least one result MUST include a ``resumeAfter`` option and MUST NOT include a ``startAfter`` option when resuming a change stream. diff --git a/test/spec/client-side-encryption/tests/README.rst b/test/spec/client-side-encryption/tests/README.rst deleted file mode 100644 index c3502568753..00000000000 --- a/test/spec/client-side-encryption/tests/README.rst +++ /dev/null @@ -1,1744 +0,0 @@ -============================ -Client Side Encryption Tests -============================ - -.. contents:: - ----- - -Introduction -============ - -This document describes the format of the driver spec tests included in the -JSON and YAML files included in this directory. The -``timeoutMS.yml``/``timeoutMS.json`` files in this directory contain tests -for the ``timeoutMS`` option and its application to the client-side -encryption feature. Drivers MUST only run these tests after implementing the -`Client Side Operations Timeout -<../client-side-operations-timeout/client-side-operations-timeout.rst>`__ -specification. - -Additional prose tests, that are not represented in the spec tests, are described -and MUST be implemented by all drivers. - -Spec Test Format -================ - -The spec tests format is an extension of `transactions spec tests `_ with some additions: - -- A ``json_schema`` to set on the collection used for operations. - -- An ``encrypted_fields`` to set on the collection used for operations. - -- A ``key_vault_data`` of data that should be inserted in the key vault collection before each test. - -- Introduction ``autoEncryptOpts`` to `clientOptions` - -- Addition of `$db` to command in `command_started_event` - -- Addition of `$$type` to command_started_event and outcome. - -The semantics of `$$type` is that any actual value matching one of the types indicated by either a BSON type string -or an array of BSON type strings is considered a match. - -For example, the following matches a command_started_event for an insert of a document where `random` must be of type ``binData``:: - - - command_started_event: - command: - insert: *collection_name - documents: - - { random: { $$type: "binData" } } - ordered: true - command_name: insert - -The following matches a command_started_event for an insert of a document where ``random`` must be of type -``binData`` or ``string``:: - - - command_started_event: - command: - insert: *collection_name - documents: - - { random: { $$type: ["binData", "string"] } } - ordered: true - command_name: insert - -The values of `$$type` correspond to `these documented string representations of BSON types `_. - - -Each YAML file has the following keys: - -.. |txn| replace:: Unchanged from Transactions spec tests. - -- ``runOn`` |txn| - -- ``database_name`` |txn| - -- ``collection_name`` |txn| - -- ``data`` |txn| - -- ``json_schema`` A JSON Schema that should be set on the collection (using ``createCollection``) before each test run. - -- ``encrypted_fields`` An encryptedFields option that should be set on the collection (using ``createCollection``) before each test run. - -- ``key_vault_data`` The data that should exist in the key vault collection under test before each test run. - -- ``tests``: An array of tests that are to be run independently of each other. - Each test will have some or all of the following fields: - - - ``description``: |txn| - - - ``skipReason``: |txn| - - - ``useMultipleMongoses``: |txn| - - - ``failPoint``: |txn| - - - ``clientOptions``: Optional, parameters to pass to MongoClient(). - - - ``autoEncryptOpts``: Optional - - - ``kmsProviders`` A dictionary of KMS providers to set on the key vault ("aws" or "local") - - - ``aws`` The AWS KMS provider. An empty object. Drivers MUST fill in AWS credentials (`accessKeyId`, `secretAccessKey`) from the environment. - - - ``azure`` The Azure KMS provider credentials. An empty object. Drivers MUST fill in Azure credentials (`tenantId`, `clientId`, and `clientSecret`) from the environment. - - - ``gcp`` The GCP KMS provider credentials. An empty object. Drivers MUST fill in GCP credentials (`email`, `privateKey`) from the environment. - - - ``local`` The local KMS provider. - - - ``key`` A 96 byte local key. - - - ``kmip`` The KMIP KMS provider credentials. An empty object. Drivers MUST fill in KMIP credentials (`endpoint`, and TLS options). - - - ``schemaMap``: Optional, a map from namespaces to local JSON schemas. - - - ``keyVaultNamespace``: Optional, a namespace to the key vault collection. Defaults to "keyvault.datakeys". - - - ``bypassAutoEncryption``: Optional, a boolean to indicate whether or not auto encryption should be bypassed. Defaults to ``false``. - - - ``encryptedFieldsMap`` An optional document. The document maps collection namespace to ``EncryptedFields`` documents. - - - ``operations``: Array of documents, each describing an operation to be - executed. Each document has the following fields: - - - ``name``: |txn| - - - ``object``: |txn|. Defaults to "collection" if omitted. - - - ``collectionOptions``: |txn| - - - ``command_name``: |txn| - - - ``arguments``: |txn| - - - ``result``: Same as the Transactions spec test format with one addition: if the operation is expected to return - an error, the ``result`` document may contain an ``isTimeoutError`` boolean field. If ``true``, the test runner - MUST assert that the error represents a timeout due to the use of the ``timeoutMS`` option. If ``false``, the - test runner MUST assert that the error does not represent a timeout. - - - ``expectations``: |txn| - - - ``outcome``: |txn| - - - -Use as integration tests -======================== - -Do the following before running spec tests: - -- If available for the platform under test, obtain a csfle_ binary and place it - in a location accessible to the tests. Refer to: `Using csfle`_ -- Start the mongocryptd process. -- Start a mongod process with **server version 4.1.9 or later**. -- Place credentials to an AWS IAM user (access key ID + secret access key) somewhere in the environment outside of tracked code. (If testing on evergreen, project variables are a good place). -- Start a KMIP test server on port 5698 by running `drivers-evergreen-tools/.evergreen/csfle/kms_kmip_server.py `_. - -.. _csfle: ../client-side-encryption.rst#csfle - -Load each YAML (or JSON) file using a Canonical Extended JSON parser. - -Then for each element in ``tests``: - -#. If the ``skipReason`` field is present, skip this test completely. -#. If the ``key_vault_data`` field is present: - - #. Drop the ``keyvault.datakeys`` collection using writeConcern "majority". - #. Insert the data specified into the ``keyvault.datakeys`` with write concern "majority". - -#. Create a MongoClient. - -#. Create a collection object from the MongoClient, using the ``database_name`` - and ``collection_name`` fields from the YAML file. Drop the collection - with writeConcern "majority". If a ``json_schema`` is defined in the test, - use the ``createCollection`` command to explicitly create the collection: - - .. code:: typescript - - {"create": , "validator": {"$jsonSchema": }} - - If ``encrypted_fields`` is defined in the test, the required collections and index described in `FLE 2 CreateCollection() and Collection.Drop() `_ must be created: - - Use the ``dropCollection`` helper with ``encrypted_fields`` as an option and writeConcern "majority". - - Use the ``createCollection`` helper with ``encrypted_fields`` as an option. - -#. If the YAML file contains a ``data`` array, insert the documents in ``data`` - into the test collection, using writeConcern "majority". - -#. Create a **new** MongoClient using ``clientOptions``. - - #. If ``autoEncryptOpts`` includes ``aws``, ``awsTemporary``, ``awsTemporaryNoSessionToken``, - ``azure``, ``gcp``, and/or ``kmip`` as a KMS provider, pass in credentials from the environment. - - - ``awsTemporary``, and ``awsTemporaryNoSessionToken`` require temporary - AWS credentials. These can be retrieved using the csfle `set-temp-creds.sh - `_ - script. - - - ``aws``, ``awsTemporary``, and ``awsTemporaryNoSessionToken`` are - mutually exclusive. - - ``aws`` should be substituted with: - - .. code:: javascript - - "aws": { - "accessKeyId": , - "secretAccessKey": - } - - ``awsTemporary`` should be substituted with: - - .. code:: javascript - - "aws": { - "accessKeyId": , - "secretAccessKey": - "sessionToken": - } - - ``awsTemporaryNoSessionToken`` should be substituted with: - - .. code:: javascript - - "aws": { - "accessKeyId": , - "secretAccessKey": - } - - ``gcp`` should be substituted with: - - .. code:: javascript - - "gcp": { - "email": , - "privateKey": , - } - - ``azure`` should be substituted with: - - .. code:: javascript - - "azure": { - "tenantId": , - "clientId": , - "clientSecret": , - } - - ``local`` should be substituted with: - - .. code:: javascript - - "local": { "key": } - - ``kmip`` should be substituted with: - - .. code:: javascript - - "kmip": { "endpoint": "localhost:5698" } - - Configure KMIP TLS connections to use the following options: - - - ``tlsCAFile`` (or equivalent) set to `drivers-evergreen-tools/.evergreen/x509gen/ca.pem `_. This MAY be configured system-wide. - - ``tlsCertificateKeyFile`` (or equivalent) set to `drivers-evergreen-tools/.evergreen/x509gen/client.pem `_. - - The method of passing TLS options for KMIP TLS connections is driver dependent. - - #. If ``autoEncryptOpts`` does not include ``keyVaultNamespace``, default it - to ``keyvault.datakeys``. - -#. For each element in ``operations``: - - - Enter a "try" block or your programming language's closest equivalent. - - Create a Database object from the MongoClient, using the ``database_name`` - field at the top level of the test file. - - Create a Collection object from the Database, using the - ``collection_name`` field at the top level of the test file. - If ``collectionOptions`` is present create the Collection object with the - provided options. Otherwise create the object with the default options. - - Execute the named method on the provided ``object``, passing the - arguments listed. - - If the driver throws an exception / returns an error while executing this - series of operations, store the error message and server error code. - - If the result document has an "errorContains" field, verify that the - method threw an exception or returned an error, and that the value of the - "errorContains" field matches the error string. "errorContains" is a - substring (case-insensitive) of the actual error message. - - If the result document has an "errorCodeName" field, verify that the - method threw a command failed exception or returned an error, and that - the value of the "errorCodeName" field matches the "codeName" in the - server error response. - - If the result document has an "errorLabelsContain" field, verify that the - method threw an exception or returned an error. Verify that all of the - error labels in "errorLabelsContain" are present in the error or exception - using the ``hasErrorLabel`` method. - - If the result document has an "errorLabelsOmit" field, verify that the - method threw an exception or returned an error. Verify that none of the - error labels in "errorLabelsOmit" are present in the error or exception - using the ``hasErrorLabel`` method. - - If the operation returns a raw command response, eg from ``runCommand``, - then compare only the fields present in the expected result document. - Otherwise, compare the method's return value to ``result`` using the same - logic as the CRUD Spec Tests runner. - -#. If the test includes a list of command-started events in ``expectations``, - compare them to the actual command-started events using the - same logic as the Command Monitoring Spec Tests runner. - -#. For each element in ``outcome``: - - - If ``name`` is "collection", create a new MongoClient *without encryption* - and verify that the test collection contains exactly the documents in the - ``data`` array. Ensure this find reads the latest data by using - **primary read preference** with **local read concern** even when the - MongoClient is configured with another read preference or read concern. - -The spec test MUST be run with *and* without auth. - - -Using ``csfle`` -=============== - -On platforms where csfle_ is available, drivers should prefer to test with the -csfle library instead of spawning mongocryptd, although having some tests -dedicated to mongocryptd is recommended. Note that some tests assert on -mongocryptd-related behaviors (e.g. the ``mongocryptdBypassSpawn`` test). - -Drivers under test should load the csfle_ library using either the ``csflePath`` -public API option (as part of the AutoEncryption ``extraOptions``), or by -setting a special search path instead. - -Some tests will require *not* using csfle_. For such tests, one should ensure -that csfle will not be loaded. Refer to the client-side-encryption documentation -for information on "disabling" csfle and setting csfle search paths. - -.. note:: - - At time of writing, csfle_ does not properly handle the ``explain`` - command and will fail to parse it. This will cause the ``explain`` test case - to fail if ``csfle`` is in use instead of ``mongocryptd``. - -.. note:: - - The ``csfle`` dynamic library can be obtained using the mongodl_ Python - script from drivers-evergreen-tools_: - - .. code-block:: shell - - $ python3 mongodl.py --component=csfle --version=5.3.1 --out=./csfle/ - -.. _mongodl: https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/mongodl.py -.. _drivers-evergreen-tools: https://github.com/mongodb-labs/drivers-evergreen-tools/ - - - -Prose Tests -=========== - -Tests for the ClientEncryption type are not included as part of the YAML tests. - -In the prose tests LOCAL_MASTERKEY refers to the following base64: - -.. code:: javascript - - Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk - -Perform all applicable operations on key vault collections (e.g. inserting an example data key, or running a find command) with readConcern/writeConcern "majority". - -Data key and double encryption -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -First, perform the setup. - -#. Create a MongoClient without encryption enabled (referred to as ``client``). Enable command monitoring to listen for command_started events. - -#. Using ``client``, drop the collections ``keyvault.datakeys`` and ``db.coll``. - -#. Create the following: - - - A MongoClient configured with auto encryption (referred to as ``client_encrypted``) - - A ``ClientEncryption`` object (referred to as ``client_encryption``) - - Configure both objects with the following KMS providers: - - .. code:: javascript - - { - "aws": { - "accessKeyId": , - "secretAccessKey": - }, - "azure": { - "tenantId": , - "clientId": , - "clientSecret": , - }, - "gcp": { - "email": , - "privateKey": , - } - "local": { "key": }, - "kmip": { "endpoint": "localhost:5698" } - } - - Configure KMIP TLS connections to use the following options: - - - ``tlsCAFile`` (or equivalent) set to `drivers-evergreen-tools/.evergreen/x509gen/ca.pem `_. This MAY be configured system-wide. - - ``tlsCertificateKeyFile`` (or equivalent) set to `drivers-evergreen-tools/.evergreen/x509gen/client.pem `_. - - The method of passing TLS options for KMIP TLS connections is driver dependent. - - Configure both objects with ``keyVaultNamespace`` set to ``keyvault.datakeys``. - - Configure the ``MongoClient`` with the following ``schema_map``: - - .. code:: javascript - - { - "db.coll": { - "bsonType": "object", - "properties": { - "encrypted_placeholder": { - "encrypt": { - "keyId": "/placeholder", - "bsonType": "string", - "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" - } - } - } - } - } - - Configure ``client_encryption`` with the ``keyVaultClient`` of the previously created ``client``. - -For each KMS provider (``aws``, ``azure``, ``gcp``, ``local``, and ``kmip``), referred to as ``provider_name``, run the following test. - -#. Call ``client_encryption.createDataKey()``. - - - Set keyAltNames to ``["_altname"]``. - - Set the masterKey document based on ``provider_name``. - - For "aws": - - .. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" - } - - For "azure": - - .. code:: javascript - - { - "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", - "keyName": "key-name-csfle" - } - - For "gcp": - - .. code:: javascript - - { - "projectId": "devprod-drivers", - "location": "global", - "keyRing": "key-ring-csfle", - "keyName": "key-name-csfle" - } - - For "kmip": - - .. code:: javascript - - {} - - For "local", do not set a masterKey document. - - Expect a BSON binary with subtype 4 to be returned, referred to as ``datakey_id``. - - Use ``client`` to run a ``find`` on ``keyvault.datakeys`` by querying with the ``_id`` set to the ``datakey_id``. - - Expect that exactly one document is returned with the "masterKey.provider" equal to ``provider_name``. - - Check that ``client`` captured a command_started event for the ``insert`` command containing a majority writeConcern. - -#. Call ``client_encryption.encrypt()`` with the value "hello ", the algorithm ``AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic``, and the ``key_id`` of ``datakey_id``. - - - Expect the return value to be a BSON binary subtype 6, referred to as ``encrypted``. - - Use ``client_encrypted`` to insert ``{ _id: "", "value": }`` into ``db.coll``. - - Use ``client_encrypted`` to run a find querying with ``_id`` of "" and expect ``value`` to be "hello ". - -#. Call ``client_encryption.encrypt()`` with the value "hello ", the algorithm ``AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic``, and the ``key_alt_name`` of ``_altname``. - - - Expect the return value to be a BSON binary subtype 6. Expect the value to exactly match the value of ``encrypted``. - -#. Test explicit encrypting an auto encrypted field. - - - Use ``client_encrypted`` to attempt to insert ``{ "encrypted_placeholder": }`` - - Expect an exception to be thrown, since this is an attempt to auto encrypt an already encrypted value. - - - -External Key Vault Test -~~~~~~~~~~~~~~~~~~~~~~~ - -Run the following tests twice, parameterized by a boolean ``withExternalKeyVault``. - -#. Create a MongoClient without encryption enabled (referred to as ``client``). - -#. Using ``client``, drop the collections ``keyvault.datakeys`` and ``db.coll``. - Insert the document `external/external-key.json <../external/external-key.json>`_ into ``keyvault.datakeys``. - -#. Create the following: - - - A MongoClient configured with auto encryption (referred to as ``client_encrypted``) - - A ``ClientEncryption`` object (referred to as ``client_encryption``) - - Configure both objects with the ``local`` KMS providers as follows: - - .. code:: javascript - - { "local": { "key": } } - - Configure both objects with ``keyVaultNamespace`` set to ``keyvault.datakeys``. - - Configure ``client_encrypted`` to use the schema `external/external-schema.json <../external/external-schema.json>`_ for ``db.coll`` by setting a schema map like: ``{ "db.coll": }`` - - If ``withExternalKeyVault == true``, configure both objects with an external key vault client. The external client MUST connect to the same - MongoDB cluster that is being tested against, except it MUST use the username ``fake-user`` and password ``fake-pwd``. - -#. Use ``client_encrypted`` to insert the document ``{"encrypted": "test"}`` into ``db.coll``. - If ``withExternalKeyVault == true``, expect an authentication exception to be thrown. Otherwise, expect the insert to succeed. - -#. Use ``client_encryption`` to explicitly encrypt the string ``"test"`` with key ID ``LOCALAAAAAAAAAAAAAAAAA==`` and deterministic algorithm. - If ``withExternalKeyVault == true``, expect an authentication exception to be thrown. Otherwise, expect the insert to succeed. - - -BSON size limits and batch splitting -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -First, perform the setup. - -#. Create a MongoClient without encryption enabled (referred to as ``client``). - -#. Using ``client``, drop and create the collection ``db.coll`` configured with the included JSON schema `limits/limits-schema.json <../limits/limits-schema.json>`_. - -#. Using ``client``, drop the collection ``keyvault.datakeys``. Insert the document `limits/limits-key.json <../limits/limits-key.json>`_ - -#. Create a MongoClient configured with auto encryption (referred to as ``client_encrypted``) - - Configure with the ``local`` KMS provider as follows: - - .. code:: javascript - - { "local": { "key": } } - - Configure with the ``keyVaultNamespace`` set to ``keyvault.datakeys``. - -Using ``client_encrypted`` perform the following operations: - -#. Insert ``{ "_id": "over_2mib_under_16mib", "unencrypted": }``. - - Expect this to succeed since this is still under the ``maxBsonObjectSize`` limit. - -#. Insert the document `limits/limits-doc.json <../limits/limits-doc.json>`_ concatenated with ``{ "_id": "encryption_exceeds_2mib", "unencrypted": < the string "a" repeated (2097152 - 2000) times > }`` - Note: limits-doc.json is a 1005 byte BSON document that encrypts to a ~10,000 byte document. - - Expect this to succeed since after encryption this still is below the normal maximum BSON document size. - Note, before auto encryption this document is under the 2 MiB limit. After encryption it exceeds the 2 MiB limit, but does NOT exceed the 16 MiB limit. - -#. Bulk insert the following: - - - ``{ "_id": "over_2mib_1", "unencrypted": }`` - - - ``{ "_id": "over_2mib_2", "unencrypted": }`` - - Expect the bulk write to succeed and split after first doc (i.e. two inserts occur). This may be verified using `command monitoring `_. - -#. Bulk insert the following: - - - The document `limits/limits-doc.json <../limits/limits-doc.json>`_ concatenated with ``{ "_id": "encryption_exceeds_2mib_1", "unencrypted": < the string "a" repeated (2097152 - 2000) times > }`` - - - The document `limits/limits-doc.json <../limits/limits-doc.json>`_ concatenated with ``{ "_id": "encryption_exceeds_2mib_2", "unencrypted": < the string "a" repeated (2097152 - 2000) times > }`` - - Expect the bulk write to succeed and split after first doc (i.e. two inserts occur). This may be verified using `command monitoring `_. - -#. Insert ``{ "_id": "under_16mib", "unencrypted": ``. - - Expect this to succeed since this is still (just) under the ``maxBsonObjectSize`` limit. - -#. Insert the document `limits/limits-doc.json <../limits/limits-doc.json>`_ concatenated with ``{ "_id": "encryption_exceeds_16mib", "unencrypted": < the string "a" repeated (16777216 - 2000) times > }`` - - Expect this to fail since encryption results in a document exceeding the ``maxBsonObjectSize`` limit. - -Optionally, if it is possible to mock the maxWriteBatchSize (i.e. the maximum number of documents in a batch) test that setting maxWriteBatchSize=1 and inserting the two documents ``{ "_id": "a" }, { "_id": "b" }`` with ``client_encrypted`` splits the operation into two inserts. - - -Views are prohibited -~~~~~~~~~~~~~~~~~~~~ - -#. Create a MongoClient without encryption enabled (referred to as ``client``). - -#. Using ``client``, drop and create a view named ``db.view`` with an empty pipeline. E.g. using the command ``{ "create": "view", "viewOn": "coll" }``. - -#. Create a MongoClient configured with auto encryption (referred to as ``client_encrypted``) - - Configure with the ``local`` KMS provider as follows: - - .. code:: javascript - - { "local": { "key": } } - - Configure with the ``keyVaultNamespace`` set to ``keyvault.datakeys``. - -#. Using ``client_encrypted``, attempt to insert a document into ``db.view``. Expect an exception to be thrown containing the message: "cannot auto encrypt a view". - - -Corpus Test -~~~~~~~~~~~ - -The corpus test exhaustively enumerates all ways to encrypt all BSON value types. Note, the test data includes BSON binary subtype 4 (or standard UUID), which MUST be decoded and encoded as subtype 4. Run the test as follows. - -1. Create a MongoClient without encryption enabled (referred to as ``client``). - -2. Using ``client``, drop and create the collection ``db.coll`` configured with the included JSON schema `corpus/corpus-schema.json <../corpus/corpus-schema.json>`_. - -3. Using ``client``, drop the collection ``keyvault.datakeys``. Insert the documents `corpus/corpus-key-local.json <../corpus/corpus-key-local.json>`_, `corpus/corpus-key-aws.json <../corpus/corpus-key-aws.json>`_, `corpus/corpus-key-azure.json <../corpus/corpus-key-azure.json>`_, `corpus/corpus-key-gcp.json <../corpus/corpus-key-gcp.json>`_, and `corpus/corpus-key-kmip.json <../corpus/corpus-key-kmip.json>`_. - -4. Create the following: - - - A MongoClient configured with auto encryption (referred to as ``client_encrypted``) - - A ``ClientEncryption`` object (referred to as ``client_encryption``) - - Configure both objects with ``aws``, ``azure``, ``gcp``, ``local``, and ``kmip`` KMS providers as follows: - - .. code:: javascript - - { - "aws": { }, - "azure": { }, - "gcp": { }, - "local": { "key": }, - "kmip": { "endpoint": "localhost:5698" } } - } - - Configure KMIP TLS connections to use the following options: - - - ``tlsCAFile`` (or equivalent) set to `drivers-evergreen-tools/.evergreen/x509gen/ca.pem `_. This MAY be configured system-wide. - - ``tlsCertificateKeyFile`` (or equivalent) set to `drivers-evergreen-tools/.evergreen/x509gen/client.pem `_. - - The method of passing TLS options for KMIP TLS connections is driver dependent. - - Where LOCAL_MASTERKEY is the following base64: - - .. code:: javascript - - Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk - - Configure both objects with ``keyVaultNamespace`` set to ``keyvault.datakeys``. - -5. Load `corpus/corpus.json <../corpus/corpus.json>`_ to a variable named ``corpus``. The corpus contains subdocuments with the following fields: - - - ``kms`` is ``aws``, ``azure``, ``gcp``, ``local``, or ``kmip`` - - ``type`` is a BSON type string `names coming from here `_) - - ``algo`` is either ``rand`` or ``det`` for random or deterministic encryption - - ``method`` is either ``auto``, for automatic encryption or ``explicit`` for explicit encryption - - ``identifier`` is either ``id`` or ``altname`` for the key identifier - - ``allowed`` is a boolean indicating whether the encryption for the given parameters is permitted. - - ``value`` is the value to be tested. - - Create a new BSON document, named ``corpus_copied``. - Iterate over each field of ``corpus``. - - - If the field name is ``_id``, ``altname_aws``, ``altname_local``, ``altname_azure``, ``altname_gcp``, or ``altname_kmip`` copy the field to ``corpus_copied``. - - If ``method`` is ``auto``, copy the field to ``corpus_copied``. - - If ``method`` is ``explicit``, use ``client_encryption`` to explicitly encrypt the value. - - - Encrypt with the algorithm described by ``algo``. - - If ``identifier`` is ``id`` - - - If ``kms`` is ``local`` set the key_id to the UUID with base64 value ``LOCALAAAAAAAAAAAAAAAAA==``. - - If ``kms`` is ``aws`` set the key_id to the UUID with base64 value ``AWSAAAAAAAAAAAAAAAAAAA==``. - - If ``kms`` is ``azure`` set the key_id to the UUID with base64 value ``AZUREAAAAAAAAAAAAAAAAA==``. - - If ``kms`` is ``gcp`` set the key_id to the UUID with base64 value ``GCPAAAAAAAAAAAAAAAAAAA==``. - - If ``kms`` is ``kmip`` set the key_id to the UUID with base64 value ``KMIPAAAAAAAAAAAAAAAAAA==``. - - - If ``identifier`` is ``altname`` - - - If ``kms`` is ``local`` set the key_alt_name to "local". - - If ``kms`` is ``aws`` set the key_alt_name to "aws". - - If ``kms`` is ``azure`` set the key_alt_name to "azure". - - If ``kms`` is ``gcp`` set the key_alt_name to "gcp". - - If ``kms`` is ``kmip`` set the key_alt_name to "kmip". - - If ``allowed`` is true, copy the field and encrypted value to ``corpus_copied``. - If ``allowed`` is false. verify that an exception is thrown. Copy the unencrypted value to to ``corpus_copied``. - - -6. Using ``client_encrypted``, insert ``corpus_copied`` into ``db.coll``. - -7. Using ``client_encrypted``, find the inserted document from ``db.coll`` to a variable named ``corpus_decrypted``. Since it should have been automatically decrypted, assert the document exactly matches ``corpus``. - -8. Load `corpus/corpus_encrypted.json <../corpus/corpus-encrypted.json>`_ to a variable named ``corpus_encrypted_expected``. - Using ``client`` find the inserted document from ``db.coll`` to a variable named ``corpus_encrypted_actual``. - - Iterate over each field of ``corpus_encrypted_expected`` and check the following: - - - If the ``algo`` is ``det``, that the value equals the value of the corresponding field in ``corpus_encrypted_actual``. - - If the ``algo`` is ``rand`` and ``allowed`` is true, that the value does not equal the value of the corresponding field in ``corpus_encrypted_actual``. - - If ``allowed`` is true, decrypt the value with ``client_encryption``. Decrypt the value of the corresponding field of ``corpus_encrypted`` and validate that they are both equal. - - If ``allowed`` is false, validate the value exactly equals the value of the corresponding field of ``corpus`` (neither was encrypted). - -9. Repeat steps 1-8 with a local JSON schema. I.e. amend step 4 to configure the schema on ``client_encrypted`` with the ``schema_map`` option. - -Custom Endpoint Test -~~~~~~~~~~~~~~~~~~~~ - -Setup -````` - -For each test cases, start by creating two ``ClientEncryption`` objects. Recreate the ``ClientEncryption`` objects for each test case. - -Create a ``ClientEncryption`` object (referred to as ``client_encryption``) - -Configure with ``keyVaultNamespace`` set to ``keyvault.datakeys``, and a default MongoClient as the ``keyVaultClient``. - -Configure with KMS providers as follows: - -.. code:: javascript - - { - "aws": { - "accessKeyId": , - "secretAccessKey": - }, - "azure": { - "tenantId": , - "clientId": , - "clientSecret": , - "identityPlatformEndpoint": "login.microsoftonline.com:443" - }, - "gcp": { - "email": , - "privateKey": , - "endpoint": "oauth2.googleapis.com:443" - }, - "kmip" { - "endpoint": "localhost:5698" - } - } - -Create a ``ClientEncryption`` object (referred to as ``client_encryption_invalid``) - -Configure with ``keyVaultNamespace`` set to ``keyvault.datakeys``, and a default MongoClient as the ``keyVaultClient``. - -Configure with KMS providers as follows: - -.. code:: javascript - - { - "azure": { - "tenantId": , - "clientId": , - "clientSecret": , - "identityPlatformEndpoint": "doesnotexist.invalid:443" - }, - "gcp": { - "email": , - "privateKey": , - "endpoint": "doesnotexist.invalid:443" - }, - "kmip": { - "endpoint": "doesnotexist.local:5698" - } - } - -Configure KMIP TLS connections to use the following options: - -- ``tlsCAFile`` (or equivalent) set to `drivers-evergreen-tools/.evergreen/x509gen/ca.pem `_. This MAY be configured system-wide. -- ``tlsCertificateKeyFile`` (or equivalent) set to `drivers-evergreen-tools/.evergreen/x509gen/client.pem `_. - -The method of passing TLS options for KMIP TLS connections is driver dependent. - -Test cases -`````````` - -1. Call `client_encryption.createDataKey()` with "aws" as the provider and the following masterKey: - - .. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" - } - - Expect this to succeed. Use the returned UUID of the key to explicitly encrypt and decrypt the string "test" to validate it works. - -2. Call `client_encryption.createDataKey()` with "aws" as the provider and the following masterKey: - - .. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - endpoint: "kms.us-east-1.amazonaws.com" - } - - Expect this to succeed. Use the returned UUID of the key to explicitly encrypt and decrypt the string "test" to validate it works. - -3. Call `client_encryption.createDataKey()` with "aws" as the provider and the following masterKey: - - .. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - endpoint: "kms.us-east-1.amazonaws.com:443" - } - - Expect this to succeed. Use the returned UUID of the key to explicitly encrypt and decrypt the string "test" to validate it works. - -4. Call `client_encryption.createDataKey()` with "aws" as the provider and the following masterKey: - - .. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - endpoint: "kms.us-east-1.amazonaws.com:12345" - } - - Expect this to fail with a socket connection error. - -5. Call `client_encryption.createDataKey()` with "aws" as the provider and the following masterKey: - - .. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - endpoint: "kms.us-east-2.amazonaws.com" - } - - Expect this to fail with an exception. - -6. Call `client_encryption.createDataKey()` with "aws" as the provider and the following masterKey: - - .. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - endpoint: "doesnotexist.invalid" - } - - Expect this to fail with a network exception indicating failure to resolve "doesnotexist.invalid". - -7. Call `client_encryption.createDataKey()` with "azure" as the provider and the following masterKey: - - .. code:: javascript - - { - "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", - "keyName": "key-name-csfle" - } - - Expect this to succeed. Use the returned UUID of the key to explicitly encrypt and decrypt the string "test" to validate it works. - - Call ``client_encryption_invalid.createDataKey()`` with the same masterKey. Expect this to fail with a network exception indicating failure to resolve "doesnotexist.invalid". - -8. Call `client_encryption.createDataKey()` with "gcp" as the provider and the following masterKey: - - .. code:: javascript - - { - "projectId": "devprod-drivers", - "location": "global", - "keyRing": "key-ring-csfle", - "keyName": "key-name-csfle", - "endpoint": "cloudkms.googleapis.com:443" - } - - Expect this to succeed. Use the returned UUID of the key to explicitly encrypt and decrypt the string "test" to validate it works. - - Call ``client_encryption_invalid.createDataKey()`` with the same masterKey. Expect this to fail with a network exception indicating failure to resolve "doesnotexist.invalid". - -9. Call `client_encryption.createDataKey()` with "gcp" as the provider and the following masterKey: - - .. code:: javascript - - { - "projectId": "devprod-drivers", - "location": "global", - "keyRing": "key-ring-csfle", - "keyName": "key-name-csfle", - "endpoint": "doesnotexist.invalid:443" - } - - Expect this to fail with an exception with a message containing the string: "Invalid KMS response". - -10. Call `client_encryption.createDataKey()` with "kmip" as the provider and the following masterKey: - - .. code:: javascript - - { - "keyId": "1" - } - - Expect this to succeed. Use the returned UUID of the key to explicitly encrypt and decrypt the string "test" to validate it works. - - Call ``client_encryption_invalid.createDataKey()`` with the same masterKey. Expect this to fail with a network exception indicating failure to resolve "doesnotexist.local". - -11. Call ``client_encryption.createDataKey()`` with "kmip" as the provider and the following masterKey: - - .. code:: javascript - - { - "keyId": "1", - "endpoint": "localhost:5698" - } - - Expect this to succeed. Use the returned UUID of the key to explicitly encrypt and decrypt the string "test" to validate it works. - -12. Call ``client_encryption.createDataKey()`` with "kmip" as the provider and the following masterKey: - - .. code:: javascript - - { - "keyId": "1", - "endpoint": "doesnotexist.local:5698" - } - - Expect this to fail with a network exception indicating failure to resolve "doesnotexist.local". - -Bypass spawning mongocryptd -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. note:: - - IMPORTANT: If csfle_ is visible to the operating system's library search - mechanism, the expected server error generated by these - ``mongocryptdBypassSpawn`` tests will not appear because libmongocrypt will - load the csfle library instead of consulting mongocryptd. For these tests, it - is required that libmongocrypt *not* load csfle. Refer to the - client-side-encryption document for more information on "disabling" csfle. - - -Via mongocryptdBypassSpawn -`````````````````````````` - -The following tests that setting ``mongocryptdBypassSpawn=true`` really does bypass spawning mongocryptd. - -#. Create a MongoClient configured with auto encryption (referred to as ``client_encrypted``) - - Configure the required options. Use the ``local`` KMS provider as follows: - - .. code:: javascript - - { "local": { "key": } } - - Configure with the ``keyVaultNamespace`` set to ``keyvault.datakeys``. - - Configure ``client_encrypted`` to use the schema `external/external-schema.json <../external/external-schema.json>`_ for ``db.coll`` by setting a schema map like: ``{ "db.coll": }`` - - Configure the following ``extraOptions``: - - .. code:: javascript - - { - "mongocryptdBypassSpawn": true - "mongocryptdURI": "mongodb://localhost:27021/db?serverSelectionTimeoutMS=1000", - "mongocryptdSpawnArgs": [ "--pidfilepath=bypass-spawning-mongocryptd.pid", "--port=27021"] - } - - Drivers MAY pass a different port if they expect their testing infrastructure to be using port 27021. Pass a port that should be free. - -#. Use ``client_encrypted`` to insert the document ``{"encrypted": "test"}`` into ``db.coll``. Expect a server selection error propagated from the internal MongoClient failing to connect to mongocryptd on port 27021. - -Via bypassAutoEncryption -```````````````````````` - -The following tests that setting ``bypassAutoEncryption=true`` really does bypass spawning mongocryptd. - -#. Create a MongoClient configured with auto encryption (referred to as ``client_encrypted``) - - Configure the required options. Use the ``local`` KMS provider as follows: - - .. code:: javascript - - { "local": { "key": } } - - Configure with the ``keyVaultNamespace`` set to ``keyvault.datakeys``. - - Configure with ``bypassAutoEncryption=true``. - - Configure the following ``extraOptions``: - - .. code:: javascript - - { - "mongocryptdSpawnArgs": [ "--pidfilepath=bypass-spawning-mongocryptd.pid", "--port=27021"] - } - - Drivers MAY pass a different value to ``--port`` if they expect their testing infrastructure to be using port 27021. Pass a port that should be free. - -#. Use ``client_encrypted`` to insert the document ``{"unencrypted": "test"}`` into ``db.coll``. Expect this to succeed. - -#. Validate that mongocryptd was not spawned. Create a MongoClient to localhost:27021 (or whatever was passed via ``--port``) with serverSelectionTimeoutMS=1000. Run a handshake command and ensure it fails with a server selection timeout. - -Deadlock tests -~~~~~~~~~~~~~~ - -.. _Connection Monitoring and Pooling: /source/connection-monitoring-and-pooling/connection-monitoring-and-pooling.rst - -The following tests only apply to drivers that have implemented a connection pool (see the `Connection Monitoring and Pooling`_ specification). - -There are multiple parameterized test cases. Before each test case, perform the setup. - -Setup -````` - -Create a ``MongoClient`` for setup operations named ``client_test``. - -Create a ``MongoClient`` for key vault operations with ``maxPoolSize=1`` named ``client_keyvault``. Capture command started events. - -Using ``client_test``, drop the collections ``keyvault.datakeys`` and ``db.coll``. - -Insert the document `external/external-key.json <../external/external-key.json>`_ into ``keyvault.datakeys`` with majority write concern. - -Create a collection ``db.coll`` configured with a JSON schema `external/external-schema.json <../external/external-schema.json>`_ as the validator, like so: - -.. code:: typescript - - {"create": "coll", "validator": {"$jsonSchema": }} - -Create a ``ClientEncryption`` object, named ``client_encryption`` configured with: -- ``keyVaultClient``=``client_test`` -- ``keyVaultNamespace``="keyvault.datakeys" -- ``kmsProviders``=``{ "local": { "key": } }`` - -Use ``client_encryption`` to encrypt the value "string0" with ``algorithm``="AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" and ``keyAltName``="local". Store the result in a variable named ``ciphertext``. - -Proceed to run the test case. - -Each test case configures a ``MongoClient`` with automatic encryption (named ``client_encrypted``). - -Each test must assert the number of unique ``MongoClient``s created. This can be accomplished by capturing ``TopologyOpeningEvent``, or by checking command started events for a client identifier (not possible in all drivers). - -Running a test case -``````````````````` -- Create a ``MongoClient`` named ``client_encrypted`` configured as follows: - - Set ``AutoEncryptionOpts``: - - ``keyVaultNamespace="keyvault.datakeys"`` - - ``kmsProviders``=``{ "local": { "key": } }`` - - Append ``TestCase.AutoEncryptionOpts`` (defined below) - - Capture command started events. - - Set ``maxPoolSize=TestCase.MaxPoolSize`` -- If the testcase sets ``AutoEncryptionOpts.bypassAutoEncryption=true``: - - Use ``client_test`` to insert ``{ "_id": 0, "encrypted": }`` into ``db.coll``. -- Otherwise: - - Use ``client_encrypted`` to insert ``{ "_id": 0, "encrypted": "string0" }``. -- Use ``client_encrypted`` to run a ``findOne`` operation on ``db.coll``, with the filter ``{ "_id": 0 }``. -- Expect the result to be ``{ "_id": 0, "encrypted": "string0" }``. -- Check captured events against ``TestCase.Expectations``. -- Check the number of unique ``MongoClient``s created is equal to ``TestCase.ExpectedNumberOfClients``. - -Case 1 -`````` -- MaxPoolSize: 1 -- AutoEncryptionOpts: - - bypassAutoEncryption=false - - keyVaultClient=unset -- Expectations: - - Expect ``client_encrypted`` to have captured four ``CommandStartedEvent``: - - a listCollections to "db". - - a find on "keyvault". - - an insert on "db". - - a find on "db" -- ExpectedNumberOfClients: 2 - -Case 2 -`````` -- MaxPoolSize: 1 -- AutoEncryptionOpts: - - bypassAutoEncryption=false - - keyVaultClient=client_keyvault -- Expectations: - - Expect ``client_encrypted`` to have captured three ``CommandStartedEvent``: - - a listCollections to "db". - - an insert on "db". - - a find on "db" - - Expect ``client_keyvault`` to have captured one ``CommandStartedEvent``: - - a find on "keyvault". -- ExpectedNumberOfClients: 2 - -Case 3 -`````` -- MaxPoolSize: 1 -- AutoEncryptionOpts: - - bypassAutoEncryption=true - - keyVaultClient=unset -- Expectations: - - Expect ``client_encrypted`` to have captured three ``CommandStartedEvent``: - - a find on "db" - - a find on "keyvault". -- ExpectedNumberOfClients: 2 - -Case 4 -`````` -- MaxPoolSize: 1 -- AutoEncryptionOpts: - - bypassAutoEncryption=true - - keyVaultClient=client_keyvault -- Expectations: - - Expect ``client_encrypted`` to have captured two ``CommandStartedEvent``: - - a find on "db" - - Expect ``client_keyvault`` to have captured one ``CommandStartedEvent``: - - a find on "keyvault". -- ExpectedNumberOfClients: 1 - -Case 5 -`````` -Drivers that do not support an unlimited maximum pool size MUST skip this test. - -- MaxPoolSize: 0 -- AutoEncryptionOpts: - - bypassAutoEncryption=false - - keyVaultClient=unset -- Expectations: - - Expect ``client_encrypted`` to have captured five ``CommandStartedEvent``: - - a listCollections to "db". - - a listCollections to "keyvault". - - a find on "keyvault". - - an insert on "db". - - a find on "db" -- ExpectedNumberOfClients: 1 - -Case 6 -`````` -Drivers that do not support an unlimited maximum pool size MUST skip this test. - -- MaxPoolSize: 0 -- AutoEncryptionOpts: - - bypassAutoEncryption=false - - keyVaultClient=client_keyvault -- Expectations: - - Expect ``client_encrypted`` to have captured three ``CommandStartedEvent``: - - a listCollections to "db". - - an insert on "db". - - a find on "db" - - Expect ``client_keyvault`` to have captured one ``CommandStartedEvent``: - - a find on "keyvault". -- ExpectedNumberOfClients: 1 - -Case 7 -`````` -Drivers that do not support an unlimited maximum pool size MUST skip this test. - -- MaxPoolSize: 0 -- AutoEncryptionOpts: - - bypassAutoEncryption=true - - keyVaultClient=unset -- Expectations: - - Expect ``client_encrypted`` to have captured three ``CommandStartedEvent``: - - a find on "db" - - a find on "keyvault". -- ExpectedNumberOfClients: 1 - -Case 8 -`````` -Drivers that do not support an unlimited maximum pool size MUST skip this test. - -- MaxPoolSize: 0 -- AutoEncryptionOpts: - - bypassAutoEncryption=true - - keyVaultClient=client_keyvault -- Expectations: - - Expect ``client_encrypted`` to have captured two ``CommandStartedEvent``: - - a find on "db" - - Expect ``client_keyvault`` to have captured one ``CommandStartedEvent``: - - a find on "keyvault". -- ExpectedNumberOfClients: 1 - -KMS TLS Tests -~~~~~~~~~~~~~ - -.. _ca.pem: https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/x509gen/ca.pem -.. _expired.pem: https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/x509gen/expired.pem -.. _wrong-host.pem: https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/x509gen/wrong-host.pem -.. _server.pem: https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/x509gen/server.pem -.. _client.pem: https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/x509gen/client.pem - -The following tests that connections to KMS servers with TLS verify peer certificates. - -The two tests below make use of mock KMS servers which can be run on Evergreen using `the mock KMS server script `_. -Drivers can set up their local Python enviroment for the mock KMS server by running `the virtualenv activation script `_. - -To start two mock KMS servers, one on port 9000 with `ca.pem`_ as a CA file and `expired.pem`_ as a cert file, and one on port 9001 with `ca.pem`_ as a CA file and `wrong-host.pem`_ as a cert file, -run the following commands from the ``.evergreen/csfle`` directory: - -.. code:: - - . ./activate-kmstlsvenv.sh - python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 9000 & - python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 9001 & - -Setup -````` - -For both tests, do the following: - -#. Start a ``mongod`` process with **server version 4.1.9 or later**. - -#. Create a ``MongoClient`` for key vault operations. - -#. Create a ``ClientEncryption`` object (referred to as ``client_encryption``) with ``keyVaultNamespace`` set to ``keyvault.datakeys``. - -Invalid KMS Certificate -``````````````````````` - -#. Start a mock KMS server on port 9000 with `ca.pem`_ as a CA file and `expired.pem`_ as a cert file. - -#. Call ``client_encryption.createDataKey()`` with "aws" as the provider and the following masterKey: - - .. code:: javascript - - { - "region": "us-east-1", - "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - "endpoint": "127.0.0.1:9000", - } - - Expect this to fail with an exception with a message referencing an expired certificate. This message will be language dependent. - In Python, this message is "certificate verify failed: certificate has expired". In Go, this message is - "certificate has expired or is not yet valid". If the language of implementation has a single, generic error message for - all certificate validation errors, drivers may inspect other fields of the error to verify its meaning. - -Invalid Hostname in KMS Certificate -``````````````````````````````````` - -#. Start a mock KMS server on port 9001 with `ca.pem`_ as a CA file and `wrong-host.pem`_ as a cert file. - -#. Call ``client_encryption.createDataKey()`` with "aws" as the provider and the following masterKey: - - .. code:: javascript - - { - "region": "us-east-1", - "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - "endpoint": "127.0.0.1:9001", - } - - Expect this to fail with an exception with a message referencing an incorrect or unexpected host. This message will be language dependent. - In Python, this message is "certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'". In Go, this message - is "cannot validate certificate for 127.0.0.1 because it doesn't contain any IP SANs". If the language of implementation has a single, generic - error message for all certificate validation errors, drivers may inspect other fields of the error to verify its meaning. - -KMS TLS Options Tests -~~~~~~~~~~~~~~~~~~~~~ - -Setup -````` - -Start a ``mongod`` process with **server version 4.1.9 or later**. - -Four mock KMS server processes must be running: - -1. The mock `KMS HTTP server `_. - - Run on port 9000 with `ca.pem`_ as a CA file and `expired.pem`_ as a cert file. - - Example: - - .. code:: - - python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 9000 - -2. The mock `KMS HTTP server `_. - - Run on port 9001 with `ca.pem`_ as a CA file and `wrong-host.pem`_ as a cert file. - - Example: - - .. code:: - - python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 9001 - -3. The mock `KMS HTTP server `_. - - Run on port 9002 with `ca.pem`_ as a CA file and `server.pem`_ as a cert file. - - Run with the ``--require_client_cert`` option. - - Example: - - .. code:: - - python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 9002 --require_client_cert - - -4. The mock `KMS KMIP server `_. - -Create the following four ``ClientEncryption`` objects. - -Configure each with ``keyVaultNamespace`` set to ``keyvault.datakeys``, and a default MongoClient as the ``keyVaultClient``. - -1. Create a ``ClientEncryption`` object named ``client_encryption_no_client_cert`` with the following KMS providers: - - .. code:: javascript - - { - "aws": { - "accessKeyId": , - "secretAccessKey": - }, - "azure": { - "tenantId": , - "clientId": , - "clientSecret": , - "identityPlatformEndpoint": "127.0.0.1:9002" - }, - "gcp": { - "email": , - "privateKey": , - "endpoint": "127.0.0.1:9002" - }, - "kmip" { - "endpoint": "127.0.0.1:5698" - } - } - - Add TLS options for the ``aws``, ``azure``, ``gcp``, and - ``kmip`` providers to use the following options: - - - ``tlsCAFile`` (or equivalent) set to `ca.pem`_. This MAY be configured system-wide. - -2. Create a ``ClientEncryption`` object named ``client_encryption_with_tls`` with the following KMS providers: - - .. code:: javascript - - { - "aws": { - "accessKeyId": , - "secretAccessKey": - }, - "azure": { - "tenantId": , - "clientId": , - "clientSecret": , - "identityPlatformEndpoint": "127.0.0.1:9002" - }, - "gcp": { - "email": , - "privateKey": , - "endpoint": "127.0.0.1:9002" - }, - "kmip" { - "endpoint": "127.0.0.1:5698" - } - } - - Add TLS options for the ``aws``, ``azure``, ``gcp``, and - ``kmip`` providers to use the following options: - - - ``tlsCAFile`` (or equivalent) set to `ca.pem`_. This MAY be configured system-wide. - - ``tlsCertificateKeyFile`` (or equivalent) set to `client.pem`_ - -3. Create a ``ClientEncryption`` object named ``client_encryption_expired`` with the following KMS providers: - - .. code:: javascript - - { - "aws": { - "accessKeyId": , - "secretAccessKey": - }, - "azure": { - "tenantId": , - "clientId": , - "clientSecret": , - "identityPlatformEndpoint": "127.0.0.1:9000" - }, - "gcp": { - "email": , - "privateKey": , - "endpoint": "127.0.0.1:9000" - }, - "kmip" { - "endpoint": "127.0.0.1:9000" - } - } - - Add TLS options for the ``aws``, ``azure``, ``gcp``, and - ``kmip`` providers to use the following options: - - - ``tlsCAFile`` (or equivalent) set to `ca.pem`_. This MAY be configured system-wide. - -4. Create a ``ClientEncryption`` object named ``client_encryption_invalid_hostname`` with the following KMS providers: - - .. code:: javascript - - { - "aws": { - "accessKeyId": , - "secretAccessKey": - }, - "azure": { - "tenantId": , - "clientId": , - "clientSecret": , - "identityPlatformEndpoint": "127.0.0.1:9001" - }, - "gcp": { - "email": , - "privateKey": , - "endpoint": "127.0.0.1:9001" - }, - "kmip" { - "endpoint": "127.0.0.1:9001" - } - } - - Add TLS options for the ``aws``, ``azure``, ``gcp``, and - ``kmip`` providers to use the following options: - - - ``tlsCAFile`` (or equivalent) set to `ca.pem`_. This MAY be configured system-wide. - -Case 1: AWS -``````````` - -Call `client_encryption_no_client_cert.createDataKey()` with "aws" as the provider and the -following masterKey: - -.. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" - endpoint: "127.0.0.1:9002" - } - -Expect an error indicating TLS handshake failed. - -Call `client_encryption_with_tls.createDataKey()` with "aws" as the provider and the -following masterKey: - -.. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" - endpoint: "127.0.0.1:9002" - } - -Expect an error from libmongocrypt with a message containing the string: "parse -error". This implies TLS handshake succeeded. - -Call `client_encryption_expired.createDataKey()` with "aws" as the provider and the -following masterKey: - -.. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" - endpoint: "127.0.0.1:9000" - } - -Expect an error indicating TLS handshake failed due to an expired certificate. - -Call `client_encryption_invalid_hostname.createDataKey()` with "aws" as the provider and the -following masterKey: - -.. code:: javascript - - { - region: "us-east-1", - key: "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" - endpoint: "127.0.0.1:9001" - } - -Expect an error indicating TLS handshake failed due to an invalid hostname. - -Case 2: Azure -````````````` - -Call `client_encryption_no_client_cert.createDataKey()` with "azure" as the provider and the -following masterKey: - -.. code:: javascript - - { 'keyVaultEndpoint': 'doesnotexist.local', 'keyName': 'foo' } - -Expect an error indicating TLS handshake failed. - -Call `client_encryption_with_tls.createDataKey()` with "azure" as the provider -and the same masterKey. - -Expect an error from libmongocrypt with a message containing the string: "HTTP -status=404". This implies TLS handshake succeeded. - -Call `client_encryption_expired.createDataKey()` with "azure" as the provider and -the same masterKey. - -Expect an error indicating TLS handshake failed due to an expired certificate. - -Call `client_encryption_invalid_hostname.createDataKey()` with "azure" as the provider and -the same masterKey. - -Expect an error indicating TLS handshake failed due to an invalid hostname. - -Case 3: GCP -``````````` - -Call `client_encryption_no_client_cert.createDataKey()` with "gcp" as the provider and the -following masterKey: - -.. code:: javascript - - { 'projectId': 'foo', 'location': 'bar', 'keyRing': 'baz', 'keyName': 'foo' } - -Expect an error indicating TLS handshake failed. - -Call `client_encryption_with_tls.createDataKey()` with "gcp" as the provider and -the same masterKey. - -Expect an error from libmongocrypt with a message containing the string: "HTTP -status=404". This implies TLS handshake succeeded. - -Call `client_encryption_expired.createDataKey()` with "gcp" as the provider and -the same masterKey. - -Expect an error indicating TLS handshake failed due to an expired certificate. - -Call `client_encryption_invalid_hostname.createDataKey()` with "gcp" as the provider and -the same masterKey. - -Expect an error indicating TLS handshake failed due to an invalid hostname. - -Case 4: KMIP -```````````` - -Call `client_encryption_no_client_cert.createDataKey()` with "kmip" as the provider and the -following masterKey: - -.. code:: javascript - - { } - -Expect an error indicating TLS handshake failed. - -Call `client_encryption_with_tls.createDataKey()` with "kmip" as the provider -and the same masterKey. - -Expect success. - -Call `client_encryption_expired.createDataKey()` with "kmip" as the provider and -the same masterKey. - -Expect an error indicating TLS handshake failed due to an expired certificate. - -Call `client_encryption_invalid_hostname.createDataKey()` with "kmip" as the provider and -the same masterKey. - -Expect an error indicating TLS handshake failed due to an invalid hostname. - -Explicit Encryption -~~~~~~~~~~~~~~~~~~~ - -The Explicit Encryption tests require MongoDB server 6.0+. The tests must not run against a standalone. - -Before running each of the following test cases, perform the following Test Setup. - -Test Setup -`````````` - -Load the file `encryptedFields.json `_ as ``encryptedFields``. - -Load the file `key1-document.json `_ as ``key1Document``. - -Read the ``"_id"`` field of ``key1Document`` as ``key1ID``. - -Drop and create the collection ``db.explicit_encryption`` using ``encryptedFields`` as an option. See `FLE 2 CreateCollection() and Collection.Drop() `_. - -Drop and create the collection ``keyvault.datakeys``. - -Create a MongoClient named ``keyVaultClient``. - -Create a ClientEncryption object named ``clientEncryption`` with these options: - -.. code:: typescript - - ClientEncryptionOpts { - keyVaultClient: ; - keyVaultNamespace: "keyvault.datakeys"; - kmsProviders: { "local": { "key": } } - } - -Create a MongoClient named ``encryptedClient`` with these ``AutoEncryptionOpts``: - -.. code:: typescript - - AutoEncryptionOpts { - keyVaultNamespace: "keyvault.datakeys"; - kmsProviders: { "local": { "key": } } - bypassQueryAnalysis: true - } - - -Case 1: can insert encrypted indexed and find -````````````````````````````````````````````` - -Use ``clientEncryption`` to encrypt the value "encrypted indexed value" with these ``EncryptOpts``: - -.. code:: typescript - - class EncryptOpts { - keyId : - algorithm: "Indexed", - } - -Store the result in ``insertPayload``. - -Use ``encryptedClient`` to insert the document ``{ "encryptedIndexed": }`` into ``db.explicit_encryption``. - -Use ``clientEncryption`` to encrypt the value "encrypted indexed value" with these ``EncryptOpts``: - -.. code:: typescript - - class EncryptOpts { - keyId : - algorithm: "Indexed", - queryType: Equality - } - -Store the result in ``findPayload``. - -Use ``encryptedClient`` to run a "find" operation on the ``db.explicit_encryption`` collection with the filter ``{ "encryptedIndexed": }``. - -Assert one document is returned containing the field ``{ "encryptedIndexed": "encrypted indexed value" }``. - -Case 2: can insert encrypted indexed and find with non-zero contention -``````````````````````````````````````````````````````````````````````` - -Use ``clientEncryption`` to encrypt the value "encrypted indexed value" with these ``EncryptOpts``: - -.. code:: typescript - - class EncryptOpts { - keyId : - algorithm: "Indexed", - contentionFactor: 10 - } - -Store the result in ``insertPayload``. - -Use ``encryptedClient`` to insert the document ``{ "encryptedIndexed": }`` into ``db.explicit_encryption``. - -Repeat the above steps 10 times to insert 10 total documents. The ``insertPayload`` must be regenerated each iteration. - -Use ``clientEncryption`` to encrypt the value "encrypted indexed value" with these ``EncryptOpts``: - -.. code:: typescript - - class EncryptOpts { - keyId : - algorithm: "Indexed", - queryType: Equality - } - -Store the result in ``findPayload``. - -Use ``encryptedClient`` to run a "find" operation on the ``db.explicit_encryption`` collection with the filter ``{ "encryptedIndexed": }``. - -Assert less than 10 documents are returned. 0 documents may be returned. Assert each returned document contains the field ``{ "encryptedIndexed": "encrypted indexed value" }``. - -Use ``clientEncryption`` to encrypt the value "encrypted indexed value" with these ``EncryptOpts``: - -.. code:: typescript - - class EncryptOpts { - keyId : - algorithm: "Indexed", - queryType: Equality, - contentionFactor: 10 - } - -Store the result in ``findPayload2``. - -Use ``encryptedClient`` to run a "find" operation on the ``db.explicit_encryption`` collection with the filter ``{ "encryptedIndexed": }``. - -Assert 10 documents are returned. Assert each returned document contains the field ``{ "encryptedIndexed": "encrypted indexed value" }``. - -Case 3: can insert encrypted unindexed -`````````````````````````````````````` - -Use ``clientEncryption`` to encrypt the value "encrypted unindexed value" with these ``EncryptOpts``: - -.. code:: typescript - - class EncryptOpts { - keyId : - algorithm: "Unindexed" - } - -Store the result in ``insertPayload``. - -Use ``encryptedClient`` to insert the document ``{ "_id": 1, "encryptedUnindexed": }`` into ``db.explicit_encryption``. - -Use ``encryptedClient`` to run a "find" operation on the ``db.explicit_encryption`` collection with the filter ``{ "_id": 1 }``. - -Assert one document is returned containing the field ``{ "encryptedUnindexed": "encrypted unindexed value" }``. - -Case 4: can roundtrip encrypted indexed -``````````````````````````````````````` - -Use ``clientEncryption`` to encrypt the value "encrypted indexed value" with these ``EncryptOpts``: - -.. code:: typescript - - class EncryptOpts { - keyId : - algorithm: "Indexed", - } - -Store the result in ``payload``. - -Use ``clientEncryption`` to decrypt ``payload``. Assert the returned value equals "encrypted indexed value". - -Case 5: can roundtrip encrypted unindexed -````````````````````````````````````````` - -Use ``clientEncryption`` to encrypt the value "encrypted unindexed value" with these ``EncryptOpts``: - -.. code:: typescript - - class EncryptOpts { - keyId : - algorithm: "Unindexed", - } - -Store the result in ``payload``. - -Use ``clientEncryption`` to decrypt ``payload``. Assert the returned value equals "encrypted unindexed value". \ No newline at end of file diff --git a/test/spec/client-side-operations-timeout/README.rst b/test/spec/client-side-operations-timeout/README.rst deleted file mode 100644 index 8a6bba61dac..00000000000 --- a/test/spec/client-side-operations-timeout/README.rst +++ /dev/null @@ -1,616 +0,0 @@ -====================================== -Client Side Operations Timeouts Tests -====================================== - -.. contents:: - ----- - -Introduction -============ - -This document describes the tests that drivers MUST run to validate the behavior of the timeoutMS option. These tests -are broken up into automated YAML/JSON tests and additional prose tests. - -Spec Tests -========== - -This directory contains a set of YAML and JSON spec tests. Drivers MUST run these as described in the "Unified Test -Runner" specification. Because the tests introduced in this specification are timing-based, there is a risk that some -of them may intermittently fail without any bugs being present in the driver. As a mitigation, drivers MAY execute -these tests in two new Evergreen tasks that use single-node replica sets: one with only authentication enabled and -another with both authentication and TLS enabled. Drivers that choose to do so SHOULD use the ``single-node-auth.json`` -and ``single-node-auth-ssl.json`` files in the ``drivers-evergreen-tools`` repository to create these clusters. - -Prose Tests -=========== - -There are some tests that cannot be expressed in the unified YAML/JSON format. For each of these tests, drivers MUST -create a MongoClient without the ``timeoutMS`` option set (referred to as ``internalClient``). Any fail points set -during a test MUST be unset using ``internalClient`` after the test has been executed. All MongoClient instances -created for tests MUST be configured with read/write concern ``majority``, read preference ``primary``, and command -monitoring enabled to listen for ``command_started`` events. - -1. Multi-batch writes -~~~~~~~~~~~~~~~~~~~~~ - -This test MUST only run against standalones on server versions 4.4 and higher. -The ``insertMany`` call takes an exceedingly long time on replicasets and sharded -clusters. Drivers MAY adjust the timeouts used in this test to allow for differing -bulk encoding performance. - -#. Using ``internalClient``, drop the ``db.coll`` collection. -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: { - times: 2 - }, - data: { - failCommands: ["insert"], - blockConnection: true, - blockTimeMS: 1010 - } - } - -#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=2000``. -#. Using ``client``, insert 50 1-megabyte documents in a single ``insertMany`` call. - - - Expect this to fail with a timeout error. - -#. Verify that two ``insert`` commands were executed against ``db.coll`` as part of the ``insertMany`` call. - -2. maxTimeMS is not set for commands sent to mongocryptd -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This test MUST only be run against enterprise server versions 4.2 and higher. - -#. Launch a mongocryptd process on 23000. -#. Create a MongoClient (referred to as ``client``) using the URI ``mongodb://localhost:23000/?timeoutMS=1000``. -#. Using ``client``, execute the ``{ ping: 1 }`` command against the ``admin`` database. -#. Verify via command monitoring that the ``ping`` command sent did not contain a ``maxTimeMS`` field. - -3. ClientEncryption -~~~~~~~~~~~~~~~~~~~ - -Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, -``LOCAL_MASTERKEY`` refers to the following base64: - -.. code:: javascript - - Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk - -For each test, perform the following setup: - -#. Using ``internalClient``, drop and create the ``keyvault.datakeys`` collection. -#. Create a MongoClient (referred to as ``keyVaultClient``) with ``timeoutMS=10``. -#. Create a ``ClientEncryption`` object that wraps ``keyVaultClient`` (referred to as ``clientEncryption``). Configure this object with ``keyVaultNamespace`` set to ``keyvault.datakeys`` and the following KMS providers map: - - .. code:: javascript - - { - "local": { "key": } - } - -createDataKey -````````````` - -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: { - times: 1 - }, - data: { - failCommands: ["insert"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Call ``clientEncryption.createDataKey()`` with the ``local`` KMS provider. - - - Expect this to fail with a timeout error. - -#. Verify that an ``insert`` command was executed against to ``keyvault.datakeys`` as part of the ``createDataKey`` call. - -encrypt -``````` - -#. Call ``client_encryption.createDataKey()`` with the ``local`` KMS provider. - - - Expect a BSON binary with subtype 4 to be returned, referred to as ``datakeyId``. - -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: { - times: 1 - }, - data: { - failCommands: ["find"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Call ``clientEncryption.encrypt()`` with the value ``hello``, the algorithm ``AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic``, and the keyId ``datakeyId``. - - - Expect this to fail with a timeout error. - -#. Verify that a ``find`` command was executed against the ``keyvault.datakeys`` collection as part of the ``encrypt`` call. - -decrypt -``````` - -#. Call ``clientEncryption.createDataKey()`` with the ``local`` KMS provider. - - - Expect this to return a BSON binary with subtype 4, referred to as ``dataKeyId``. - -#. Call ``clientEncryption.encrypt()`` with the value ``hello``, the algorithm ``AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic``, and the keyId ``dataKeyId``. - - - Expect this to return a BSON binary with subtype 6, referred to as ``encrypted``. - -#. Close and re-create the ``keyVaultClient`` and ``clientEncryption`` objects. - -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: { - times: 1 - }, - data: { - failCommands: ["find"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Call ``clientEncryption.decrypt()`` with the value ``encrypted``. - - - Expect this to fail with a timeout error. - -#. Verify that a ``find`` command was executed against the ``keyvault.datakeys`` collection as part of the ``decrypt`` call. - -4. Background Connection Pooling -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication -fields (i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait -for some CMAP events to be published. Drivers MUST wait for up to 10 seconds and fail the test if the specified events -are not published within that time. - -timeoutMS used for handshake commands -````````````````````````````````````` - -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: { - times: 1 - }, - data: { - failCommands: ["saslContinue"], - blockConnection: true, - blockTimeMS: 15, - appName: "timeoutBackgroundPoolTest" - } - } - -#. Create a MongoClient (referred to as ``client``) configured with the following: - - - ``minPoolSize`` of 1 - - ``timeoutMS`` of 10 - - ``appName`` of ``timeoutBackgroundPoolTest`` - - CMAP monitor configured to listen for ``ConnectionCreatedEvent`` and ``ConnectionClosedEvent`` events. - -#. Wait for a ``ConnectionCreatedEvent`` and a ``ConnectionClosedEvent`` to be published. - -timeoutMS is refreshed for each handshake command -````````````````````````````````````````````````` - -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: "alwaysOn", - data: { - failCommands: ["hello", "isMaster", "saslContinue"], - blockConnection: true, - blockTimeMS: 15, - appName: "refreshTimeoutBackgroundPoolTest" - } - } - -#. Create a MongoClient (referred to as ``client``) configured with the following: - - - ``minPoolSize`` of 1 - - ``timeoutMS`` of 20 - - ``appName`` of ``refreshTimeoutBackgroundPoolTest`` - - CMAP monitor configured to listen for ``ConnectionCreatedEvent`` and ``ConnectionReady`` events. - -#. Wait for a ``ConnectionCreatedEvent`` and a ``ConnectionReady`` to be published. - -5. Blocking Iteration Methods -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a -blocking method for cursor iteration that executes ``getMore`` commands in a loop until a document is available or an -error occurs. - -Tailable cursors -```````````````` - -#. Using ``internalClient``, drop the ``db.coll`` collection. -#. Using ``internalClient``, insert the document ``{ x: 1 }`` into ``db.coll``. -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: "alwaysOn", - data: { - failCommands: ["getMore"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=20``. -#. Using ``client``, create a tailable cursor on ``db.coll`` with ``cursorType=tailable``. - - - Expect this to succeed and return a cursor with a non-zero ID. - -#. Call either a blocking or non-blocking iteration method on the cursor. - - - Expect this to succeed and return the document ``{ x: 1 }`` without sending a ``getMore`` command. - -#. Call the blocking iteration method on the resulting cursor. - - - Expect this to fail with a timeout error. - -#. Verify that a ``find`` command and two ``getMore`` commands were executed against the ``db.coll`` collection during the test. - -Change Streams -`````````````` - -#. Using ``internalClient``, drop the ``db.coll`` collection. -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: "alwaysOn", - data: { - failCommands: ["getMore"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=20``. -#. Using ``client``, use the ``watch`` helper to create a change stream against ``db.coll``. - - - Expect this to succeed and return a change stream with a non-zero ID. - -#. Call the blocking iteration method on the resulting change stream. - - - Expect this to fail with a timeout error. - -#. Verify that an ``aggregate`` command and two ``getMore`` commands were executed against the ``db.coll`` collection during the test. - -6. GridFS - Upload -~~~~~~~~~~~~~~~~~~ - -Tests in this section MUST only be run against server versions 4.4 and higher. - -uploads via openUploadStream can be timed out -````````````````````````````````````````````` - -#. Using ``internalClient``, drop and re-create the ``db.fs.files`` and ``db.fs.chunks`` collections. -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: { times: 1 }, - data: { - failCommands: ["insert"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=10``. -#. Using ``client``, create a GridFS bucket (referred to as ``bucket``) that wraps the ``db`` database. -#. Call ``bucket.open_upload_stream()`` with the filename ``filename`` to create an upload stream (referred to as ``uploadStream``). - - - Expect this to succeed and return a non-null stream. - -#. Using ``uploadStream``, upload a single ``0x12`` byte. -#. Call ``uploadStream.close()`` to flush the stream and insert chunks. - - - Expect this to fail with a timeout error. - -Aborting an upload stream can be timed out -`````````````````````````````````````````` - -This test only applies to drivers that provide an API to abort a GridFS upload stream. - -#. Using ``internalClient``, drop and re-create the ``db.fs.files`` and ``db.fs.chunks`` collections. -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: { times: 1 }, - data: { - failCommands: ["delete"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=10``. -#. Using ``client``, create a GridFS bucket (referred to as ``bucket``) that wraps the ``db`` database with ``chunkSizeBytes=2``. -#. Call ``bucket.open_upload_stream()`` with the filename ``filename`` to create an upload stream (referred to as ``uploadStream``). - - - Expect this to succeed and return a non-null stream. - -#. Using ``uploadStream``, upload the bytes ``[0x01, 0x02, 0x03, 0x04]``. -#. Call ``uploadStream.abort()``. - - - Expect this to fail with a timeout error. - -7. GridFS - Download -~~~~~~~~~~~~~~~~~~~~ - -This test MUST only be run against server versions 4.4 and higher. - -#. Using ``internalClient``, drop and re-create the ``db.fs.files`` and ``db.fs.chunks`` collections. -#. Using ``internalClient``, insert the following document into the ``db.fs.files`` collection: - - .. code:: javascript - - { - "_id": { - "$oid": "000000000000000000000005" - }, - "length": 10, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "57d83cd477bfb1ccd975ab33d827a92b", - "filename": "length-10", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - } - -#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=10``. -#. Using ``client``, create a GridFS bucket (referred to as ``bucket``) that wraps the ``db`` database. -#. Call ``bucket.open_download_stream`` with the id ``{ "$oid": "000000000000000000000005" }`` to create a download stream (referred to as ``downloadStream``). - - - Expect this to succeed and return a non-null stream. - -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: "failCommand", - mode: { times: 1 }, - data: { - failCommands: ["find"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Read from the ``downloadStream``. - - - Expect this to fail with a timeout error. - -#. Verify that two ``find`` commands were executed during the read: one against ``db.fs.files`` and another against ``db.fs.chunks``. - -8. Server Selection -~~~~~~~~~~~~~~~~~~~ - -serverSelectionTimeoutMS honored if timeoutMS is not set -```````````````````````````````````````````````````````` - -#. Create a MongoClient (referred to as ``client``) with URI ``mongodb://invalid/?serverSelectionTimeoutMS=10``. - -#. Using ``client``, execute the command ``{ ping: 1 }`` against the ``admin`` database. - - - Expect this to fail with a server selection timeout error after no more than 15ms. - -timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS -`````````````````````````````````````````````````````````````````````````````````` - -#. Create a MongoClient (referred to as ``client``) with URI ``mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20``. - -#. Using ``client``, run the command ``{ ping: 1 }`` against the ``admin`` database. - - - Expect this to fail with a server selection timeout error after no more than 15ms. - -serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS -`````````````````````````````````````````````````````````````````````````````````` - -#. Create a MongoClient (referred to as ``client``) with URI ``mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10``. - -#. Using ``client``, run the command ``{ ping: 1 }`` against the ``admin`` database. - - - Expect this to fail with a server selection timeout error after no more than 15ms. - -serverSelectionTimeoutMS honored for server selection if timeoutMS=0 -```````````````````````````````````````````````````````````````````` - -#. Create a MongoClient (referred to as ``client``) with URI ``mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10``. - -#. Using ``client``, run the command ``{ ping: 1 }`` against the ``admin`` database. - - - Expect this to fail with a server selection timeout error after no more than 15ms. - -timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS -``````````````````````````````````````````````````````````````````````````````````````````````` - -This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a -username and password). - -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: failCommand, - mode: { times: 1 }, - data: { - failCommands: ["saslContinue"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=10`` and ``serverSelectionTimeoutMS=20``. -#. Using ``client``, insert the document ``{ x: 1 }`` into collection ``db.coll``. - - - Expect this to fail with a timeout error after no more than 15ms. - -serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS -``````````````````````````````````````````````````````````````````````````````````````````````` - -This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a -username and password). - -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: failCommand, - mode: { times: 1 }, - data: { - failCommands: ["saslContinue"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Create a new MongoClient (referred to as ``client``) with ``timeoutMS=20`` and ``serverSelectionTimeoutMS=10``. -#. Using ``client``, insert the document ``{ x: 1 }`` into collection ``db.coll``. - - - Expect this to fail with a timeout error after no more than 15ms. - -9. endSession -~~~~~~~~~~~~~ - -This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be -run three times: once with the timeout specified via the MongoClient ``timeoutMS`` option, once with the timeout -specified via the ClientSession ``defaultTimeoutMS`` option, and once more with the timeout specified via the -``timeoutMS`` option for the ``endSession`` operation. In all cases, the timeout MUST be set to 10 milliseconds. - -#. Using ``internalClient``, drop the ``db.coll`` collection. -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: failCommand, - mode: { times: 1 }, - data: { - failCommands: ["abortTransaction"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Create a new MongoClient (referred to as ``client``) and an explicit ClientSession derived from that MongoClient (referred to as ``session``). -#. Execute the following code: - - .. code:: typescript - - coll = client.database("db").collection("coll") - session.start_transaction() - coll.insert_one({x: 1}, session=session) - -#. Using ``session``, execute ``session.end_session`` - - - Expect this to fail with a timeout error after no more than 15ms. - -10. Convenient Transactions -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. - -timeoutMS is refreshed for abortTransaction if the callback fails -````````````````````````````````````````````````````````````````` - -#. Using ``internalClient``, drop the ``db.coll`` collection. -#. Using ``internalClient``, set the following fail point: - - .. code:: javascript - - { - configureFailPoint: failCommand, - mode: { times: 2 }, - data: { - failCommands: ["insert", "abortTransaction"], - blockConnection: true, - blockTimeMS: 15 - } - } - -#. Create a new MongoClient (referred to as ``client``) configured with ``timeoutMS=10`` and an explicit ClientSession derived from that MongoClient (referred to as ``session``). -#. Using ``session``, execute a ``withTransaction`` operation with the following callback: - - .. code:: typescript - - def callback() { - coll = client.database("db").collection("coll") - coll.insert_one({ _id: 1 }, session=session) - } - -#. Expect the previous ``withTransaction`` call to fail with a timeout error. -#. Verify that the following events were published during the ``withTransaction`` call: - - #. ``command_started`` and ``command_failed`` events for an ``insert`` command. - #. ``command_started`` and ``command_failed`` events for an ``abortTransaction`` command. - -Unit Tests -========== - -The tests enumerated in this section could not be expressed in either spec or prose format. Drivers SHOULD implement -these if it is possible to do so using the driver's existing test infrastructure. - -- Operations should ignore ``waitQueueTimeoutMS`` if ``timeoutMS`` is also set. -- If ``timeoutMS`` is set for an operation, the remaining ``timeoutMS`` value should apply to connection checkout after a server has been selected. -- If ``timeoutMS`` is not set for an operation, ``waitQueueTimeoutMS`` should apply to connection checkout after a server has been selected. -- If a new connection is required to execute an operation, ``min(remaining computedServerSelectionTimeout, connectTimeoutMS)`` should apply to socket establishment. -- For drivers that have control over OCSP behavior, ``min(remaining computedServerSelectionTimeout, 5 seconds)`` should apply to HTTP requests against OCSP responders. -- If ``timeoutMS`` is unset, operations fail after two non-consecutive socket timeouts. -- The remaining ``timeoutMS`` value should apply to HTTP requests against KMS servers for CSFLE. -- The remaining ``timeoutMS`` value should apply to commands sent to mongocryptd as part of automatic encryption. -- When doing ``minPoolSize`` maintenance, ``connectTimeoutMS`` is used as the timeout for socket establishment. diff --git a/test/spec/command-logging-and-monitoring/README.rst b/test/spec/command-logging-and-monitoring/README.rst deleted file mode 100644 index e3687be6760..00000000000 --- a/test/spec/command-logging-and-monitoring/README.rst +++ /dev/null @@ -1,60 +0,0 @@ -.. role:: javascript(code) - :language: javascript - -============================== -Command Logging and Monitoring -============================== - -.. contents:: - --------- - -Testing -======= - -Automated Tests -^^^^^^^^^^^^^^^ -There are tests in the `Unified Test Format <../../unified-test-format/unified-test-format.rst>`__ for both logging and -monitoring in `/logging <./logging>`_ and `/monitoring <./monitoring>`_, respectively. Drivers MUST run the logging -tests with their max document length setting (as described in the -`logging specification <../../logging/logging.rst#configurable-max-document-length>`__) set to a large value e.g. 10,000; -this is necessary in order for the driver to emit the full server reply (and to allow matching against that reply) on -certain MongoDB versions and topologies. - -Prose Tests -^^^^^^^^^^^ -Drivers MUST implement the following logging prose tests. These tests require the ability to capture log message data in a -structured form as described in the -`Unified Test Format specification <../../unified-test-format/unified-test-format.rst#expectedLogMessage>`__. - -Note: the following tests mention string "length"; this refers to length in terms of whatever unit the driver has chosen -to support for specifying max document length as discussed in the -`logging specification <../../logging/logging.rst#configurable-max-document-length>`__. - -*Test 1: Default truncation limit* - -1. Configure logging with a minimum severity level of "debug" for the "command" component. Do not explicitly configure the max document length. -2. Construct an array ``docs`` containing the document ``{"x" : "y"}`` repeated 100 times. -3. Insert ``docs`` to a collection via ``insertMany``. -4. Inspect the resulting "command started" log message and assert that the "command" value is a string of length 1000 + (length of trailing ellipsis). -5. Inspect the resulting "command succeeded" log message and assert that the "reply" value is a string of length <= 1000 + (length of trailing ellipsis). -6. Run ``find()`` on the collection where the document was inserted. -7. Inspect the resulting "command succeeded" log message and assert that the reply is a string of length 1000 + (length of trailing ellipsis). - -*Test 2: Explicitly configured truncation limit* - -1. Configure logging with a minimum severity level of "debug" for the "command" component. Set the max document length to 5. -2. Run the command ``{"hello": true}``. -3. Inspect the resulting "command started" log message and assert that the "command" value is a string of length 5 + (length of trailing ellipsis). -4. Inspect the resulting "command succeeded" log message and assert that the "reply" value is a string of length 5 + (length of trailing ellipsis). -5. If the driver attaches raw server responses to failures and can access these via log messages to assert on, run the command - ``{"notARealCommand": true}``. Inspect the resulting "command failed" log message and confirm that the server error is - a string of length 5 + (length of trailing ellipsis). - -*Test 3: Truncation with multi-byte codepoints* - -A specific test case is not provided here due to the allowed variations in truncation logic as well as varying extended JSON whitespace usage. -Drivers MUST write language-specific tests that confirm truncation of commands, replies, and (if applicable) server responses included in error -messages work as expected when the data being truncated includes multi-byte Unicode codepoints. -If the driver uses anything other than Unicode codepoints as the unit for max document length, there also MUST be tests confirming that cases -where the max length falls in the middle of a multi-byte codepoint are handled gracefully. diff --git a/test/spec/connection-monitoring-and-pooling/README.rst b/test/spec/connection-monitoring-and-pooling/README.rst deleted file mode 100644 index 4577255427c..00000000000 --- a/test/spec/connection-monitoring-and-pooling/README.rst +++ /dev/null @@ -1,228 +0,0 @@ -.. role:: javascript(code) - :language: javascript - -======================================== -Connection Monitoring and Pooling (CMAP) -======================================== - -.. contents:: - --------- - -Introduction -============ - -The YAML and JSON files in this directory are platform-independent tests that -drivers can use to prove their conformance to the Connection Monitoring and Pooling (CMAP) Spec. - -Several prose tests, which are not easily expressed in YAML, are also presented -in this file. Those tests will need to be manually implemented by each driver. - -Common Test Format -================== - -Each YAML file has the following keys: - -- ``version``: A version number indicating the expected format of the spec tests (current version = 1) -- ``style``: A string indicating what style of tests this file contains. Contains one of the following: - - - ``"unit"``: a test that may be run without connecting to a MongoDB deployment. - - ``"integration"``: a test that MUST be run against a real MongoDB deployment. - -- ``description``: A text description of what the test is meant to assert - -Unit Test Format: -================= - -All Unit Tests have some of the following fields: - -- ``poolOptions``: If present, connection pool options to use when creating a pool; - both `standard ConnectionPoolOptions `__ - and the following test-specific options are allowed: - - - ``backgroundThreadIntervalMS``: A time interval between the end of a - `Background Thread Run `__ - and the beginning of the next Run. If a Connection Pool does not implement a Background Thread, the Test Runner MUST ignore the option. - If the option is not specified, an implementation is free to use any value it finds reasonable. - - Possible values (0 is not allowed): - - - A negative value: never begin a Run. - - A positive value: the interval between Runs in milliseconds. - -- ``operations``: A list of operations to perform. All operations support the following fields: - - - ``name``: A string describing which operation to issue. - - ``thread``: The name of the thread in which to run this operation. If not specified, runs in the default thread - -- ``error``: Indicates that the main thread is expected to error during this test. An error may include of the following fields: - - - ``type``: the type of error emitted - - ``message``: the message associated with that error - - ``address``: Address of pool emitting error - -- ``events``: An array of all connection monitoring events expected to occur while running ``operations``. An event may contain any of the following fields - - - ``type``: The type of event emitted - - ``address``: The address of the pool emitting the event - - ``connectionId``: The id of a connection associated with the event - - ``options``: Options used to create the pool - - ``reason``: A reason giving mroe information on why the event was emitted - -- ``ignore``: An array of event names to ignore - -Valid Unit Test Operations are the following: - -- ``start(target)``: Starts a new thread named ``target`` - - - ``target``: The name of the new thread to start - -- ``wait(ms)``: Sleep the current thread for ``ms`` milliseconds - - - ``ms``: The number of milliseconds to sleep the current thread for - -- ``waitForThread(target)``: wait for thread ``target`` to finish executing. Propagate any errors to the main thread. - - - ``target``: The name of the thread to wait for. - -- ``waitForEvent(event, count, timeout)``: block the current thread until ``event`` has occurred ``count`` times - - - ``event``: The name of the event - - ``count``: The number of times the event must occur (counting from the start of the test) - - ``timeout``: If specified, time out with an error after waiting for this many milliseconds without seeing the required events - -- ``label = pool.checkOut()``: call ``checkOut`` on pool, returning the checked out connection - - - ``label``: If specified, associate this label with the returned connection, so that it may be referenced in later operations - -- ``pool.checkIn(connection)``: call ``checkIn`` on pool - - - ``connection``: A string label identifying which connection to check in. Should be a label that was previously set with ``checkOut`` - -- ``pool.clear()``: call ``clear`` on Pool -- ``pool.close()``: call ``close`` on Pool -- ``pool.ready()``: call ``ready`` on Pool - - -Integration Test Format -======================= - -The integration test format is identical to the unit test format with -the addition of the following fields to each test: - -- ``runOn`` (optional): An array of server version and/or topology requirements - for which the tests can be run. If the test environment satisfies one or more - of these requirements, the tests may be executed; otherwise, this test should - be skipped. If this field is omitted, the tests can be assumed to have no - particular requirements and should be executed. Each element will have some or - all of the following fields: - - - ``minServerVersion`` (optional): The minimum server version (inclusive) - required to successfully run the tests. If this field is omitted, it should - be assumed that there is no lower bound on the required server version. - - - ``maxServerVersion`` (optional): The maximum server version (inclusive) - against which the tests can be run successfully. If this field is omitted, - it should be assumed that there is no upper bound on the required server - version. - -- ``failPoint``: optional, a document containing a ``configureFailPoint`` - command to run against the endpoint being used for the test. - -- ``poolOptions.appName`` (optional): appName attribute to be set in connections, which will be affected by the fail point. - -Spec Test Match Function -======================== - -The definition of MATCH or MATCHES in the Spec Test Runner is as follows: - -- MATCH takes two values, ``expected`` and ``actual`` -- Notation is "Assert [actual] MATCHES [expected] -- Assertion passes if ``expected`` is a subset of ``actual``, with the values ``42`` and ``"42"`` acting as placeholders for "any value" - -Pseudocode implementation of ``actual`` MATCHES ``expected``: - -:: - - If expected is "42" or 42: - Assert that actual exists (is not null or undefined) - Else: - Assert that actual is of the same JSON type as expected - If expected is a JSON array: - For every idx/value in expected: - Assert that actual[idx] MATCHES value - Else if expected is a JSON object: - For every key/value in expected - Assert that actual[key] MATCHES value - Else: - Assert that expected equals actual - -Unit Test Runner: -================= - -For the unit tests, the behavior of a Connection is irrelevant beyond the need to asserting ``connection.id``. Drivers MAY use a mock connection class for testing the pool behavior in unit tests - -For each YAML file with ``style: unit``: - -- Create a Pool ``pool``, subscribe and capture any Connection Monitoring events emitted in order. - - - If ``poolOptions`` is specified, use those options to initialize both pools - - The returned pool must have an ``address`` set as a string value. - -- Process each ``operation`` in ``operations`` (on the main thread) - - - If a ``thread`` is specified, the main thread MUST schedule the operation to execute in the corresponding thread. Otherwise, execute the operation directly in the main thread. - -- If ``error`` is presented - - - Assert that an actual error ``actualError`` was thrown by the main thread - - Assert that ``actualError`` MATCHES ``error`` - -- Else: - - - Assert that no errors were thrown by the main thread - -- calculate ``actualEvents`` as every Connection Event emitted whose ``type`` is not in ``ignore`` -- if ``events`` is not empty, then for every ``idx``/``expectedEvent`` in ``events`` - - - Assert that ``actualEvents[idx]`` exists - - Assert that ``actualEvents[idx]`` MATCHES ``expectedEvent`` - - -It is important to note that the ``ignore`` list is used for calculating ``actualEvents``, but is NOT used for the ``waitForEvent`` command - -Integration Test Runner -======================= - -The steps to run the integration tests are the same as those used to run the -unit tests with the following modifications: - -- The integration tests MUST be run against an actual endpoint. If the - deployment being tested contains multiple endpoints, then the runner MUST - only use one of them to run the tests against. - -- For each test, if `failPoint` is specified, its value is a - ``configureFailPoint`` command. Run the command on the admin database of the - endpoint being tested to enable the fail point. - -- At the end of each test, any enabled fail point MUST be disabled to avoid - spurious failures in subsequent tests. The fail point may be disabled like - so:: - - db.adminCommand({ - configureFailPoint: , - mode: "off" - }); - - -Prose Tests -=========== - -The following tests have not yet been automated, but MUST still be tested - -#. All ConnectionPoolOptions MUST be specified at the MongoClient level -#. All ConnectionPoolOptions MUST be the same for all pools created by a MongoClient -#. A user MUST be able to specify all ConnectionPoolOptions via a URI string -#. A user MUST be able to subscribe to Connection Monitoring Events in a manner idiomatic to their language and driver -#. When a check out attempt fails because connection set up throws an error, - assert that a ConnectionCheckOutFailedEvent with reason="connectionError" is emitted. diff --git a/test/spec/index-management/README.rst b/test/spec/index-management/README.rst deleted file mode 100644 index 2840d988363..00000000000 --- a/test/spec/index-management/README.rst +++ /dev/null @@ -1,223 +0,0 @@ -====================== -Index Management Tests -====================== - -.. contents:: - ----- - -Test Plan -========= - -These prose tests are ported from the legacy enumerate-indexes spec. - -Configurations --------------- - -- standalone node -- replica set primary node -- replica set secondary node -- mongos node - -Preparation ------------ - -For each of the configurations: - -- Create a (new) database -- Create a collection -- Create a single column index, a compound index, and a unique index -- Insert at least one document containing all the fields that the above - indicated indexes act on - -Tests - -- Run the driver's method that returns a list of index names, and: - - - verify that *all* index names are represented in the result - - verify that there are no duplicate index names - - verify there are no returned indexes that do not exist - -- Run the driver's method that returns a list of index information records, and: - - - verify all the indexes are represented in the result - - verify the "unique" flags show up for the unique index - - verify there are no duplicates in the returned list - - if the result consists of statically defined index models that include an ``ns`` field, verify - that its value is accurate - -Search Index Management Helpers -------------------------------- - -These tests are intended to smoke test the search management helpers end-to-end against a live Atlas cluster. - -The search index management commands are asynchronous and mongod/mongos returns before the changes to a clusters' search indexes have completed. When -these prose tests specify "waiting for the changes", drivers should repeatedly poll the cluster with ``listSearchIndexes`` -until the changes are visible. Each test specifies the condition that is considered "ready". For example, when creating a -new search index, waiting until the inserted index has a status ``queryable: true`` indicates that the index was successfully -created. - -The commands tested in these prose tests take a while to successfully complete. Drivers should raise the timeout for each test to avoid timeout errors if -the test timeout is too low. 5 minutes is a sufficiently large timeout that any timeout that occurs indicates a real failure, but this value is not required and can be tweaked per-driver. - -There is a server-side limitation that prevents multiple search indexes from being created with the same name, definition and -collection name. This limitation does not take into account collection uuid. Because these commands are asynchronous, any cleanup -code that may run after a test (cleaning a database or dropping search indexes) may not have completed by the next iteration of the -test (or the next test run, if running locally). To address this issue, each test uses a randomly generated collection name. Drivers -may generate this collection name however they like, but a suggested implementation is a hex representation of an -ObjectId (``new ObjectId().toHexString()`` in Node). - -Setup -~~~~~ - -These tests must run against an Atlas cluster with a 7.0+ server. `Scripts are available `_ in drivers-evergreen-tools which can setup and teardown -Atlas clusters. To ensure that the Atlas cluster is cleaned up after each CI run, drivers should configure evergreen to run these tests -as a part of a task group. Be sure that the cluster gets torn down! - -When working locally on these tests, the same Atlas setup and teardown scripts can be used locally to provision a cluster for development. - -Case 1: Driver can successfully create and list search indexes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). -#. Create a new search index on ``coll0`` with the ``createSearchIndex`` helper. Use the following definition: - - .. code:: typescript - - { - name: 'test-search-index', - definition: { - mappings: { dynamic: false } - } - } - -#. Assert that the command returns the name of the index: ``"test-search-index"``. -#. Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied and store the value in a variable ``index``: - - - An index with the ``name`` of ``test-search-index`` is present and the index has a field ``queryable`` with a value of ``true``. - -#. Assert that ``index`` has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': false } }`` - -Case 2: Driver can successfully create multiple indexes in batch -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). -#. Create two new search indexes on ``coll0`` with the ``createSearchIndexes`` helper. Use the following - definitions when creating the indexes. These definitions are referred to as ``indexDefinitions``. - - .. code:: typescript - - { - name: 'test-search-index-1', - definition: { - mappings: { dynamic: false } - } - } - - { - name: 'test-search-index-2', - definition: { - mappings: { dynamic: false } - } - } - -#. Assert that the command returns an array containing the new indexes' names: ``["test-search-index-1", "test-search-index-2"]``. -#. Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following conditions are satisfied. - - - An index with the ``name`` of ``test-search-index-1`` is present and index has a field ``queryable`` with the value of ``true``. Store result in ``index1``. - - An index with the ``name`` of ``test-search-index-2`` is present and index has a field ``queryable`` with the value of ``true``. Store result in ``index2``. - -#. Assert that ``index1`` and ``index2`` have the property ``latestDefinition`` whose value is ``{ "mappings" : { "dynamic" : false } }`` - -Case 3: Driver can successfully drop search indexes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). -#. Create a new search index on ``coll0`` with the following definition: - - .. code:: typescript - - { - name: 'test-search-index', - definition: { - mappings: { dynamic: false } - } - } - -#. Assert that the command returns the name of the index: ``"test-search-index"``. -#. Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: - - - An index with the ``name`` of ``test-search-index`` is present and index has a field ``queryable`` with the value of ``true``. - -#. Run a ``dropSearchIndex`` on ``coll0``, using ``test-search-index`` for the name. -#. Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until ``listSearchIndexes`` returns an empty array. - -This test fails if it times out waiting for the deletion to succeed. - -Case 4: Driver can update a search index -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). -#. Create a new search index on ``coll0`` with the following definition: - - .. code:: typescript - - { - name: 'test-search-index', - definition: { - mappings: { dynamic: false } - } - } - -#. Assert that the command returns the name of the index: ``"test-search-index"``. -#. Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: - - - An index with the ``name`` of ``test-search-index`` is present and index has a field ``queryable`` with the value of ``true``. - -#. Run a ``updateSearchIndex`` on ``coll0``, using the following definition. - - .. code:: typescript - - { - name: 'test-search-index', - definition: { - mappings: { dynamic: true } - } - } - -#. Assert that the command does not error and the server responds with a success. -#. Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following conditions are satisfied: - - - An index with the ``name`` of ``test-search-index`` is present. This index is referred to as ``index``. - - The index has a field ``queryable`` with a value of ``true`` and has a field ``status`` with the value of ``READY``. - -#. Assert that an index is present with the name ``test-search-index`` and the definition has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': true } }``. - -Case 5: ``dropSearchIndex`` suppresses namespace not found errors -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Create a driver-side collection object for a randomly generated collection name. Do not create this collection on the server. -#. Run a ``dropSearchIndex`` command and assert that no error is thrown. - -Case 6: Driver can successfully create and list search indexes with non-default readConcern and writeConcern -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). -#. Apply a write concern ``WriteConcern(w=1)`` and a read concern with ``ReadConcern(level="majority")`` to ``coll0``. -#. Create a new search index on ``coll0`` with the ``createSearchIndex`` helper. Use the following definition: - - .. code:: typescript - - { - name: 'test-search-index-case6', - definition: { - mappings: { dynamic: false } - } - } - -#. Assert that the command returns the name of the index: ``"test-search-index-case6"``. -#. Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied and store the value in a variable ``index``: - - - An index with the ``name`` of ``test-search-index-case6`` is present and the index has a field ``queryable`` with a value of ``true``. - -#. Assert that ``index`` has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': false } }`` diff --git a/test/spec/initial-dns-seedlist-discovery/README.rst b/test/spec/initial-dns-seedlist-discovery/README.rst deleted file mode 100644 index 0e6404aa5b5..00000000000 --- a/test/spec/initial-dns-seedlist-discovery/README.rst +++ /dev/null @@ -1,135 +0,0 @@ -==================================== -Initial DNS Seedlist Discovery tests -==================================== - -This directory contains platform-independent tests that drivers can use -to prove their conformance to the Initial DNS Seedlist Discovery spec. - -Test Setup ----------- - -The tests in the ``replica-set`` directory MUST be executed against a -three-node replica set on localhost ports 27017, 27018, and 27019 with -replica set name ``repl0``. - -The tests in the ``load-balanced`` directory MUST be executed against a -load-balanced sharded cluster with the mongos servers running on localhost ports -27017 and 27018 (corresponding to the script in `drivers-evergreen-tools`_). The -load balancers, shard servers, and config servers may run on any open ports. - -.. _`drivers-evergreen-tools`: https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/run-load-balancer.sh - -The tests in the ``sharded`` directory MUST be executed against a sharded -cluster with the mongos servers running on localhost ports 27017 and 27018. -Shard servers and config servers may run on any open ports. - -In all cases, the clusters MUST be started with SSL enabled. - -To run the tests that accompany this spec, you need to configure the SRV and -TXT records with a real name server. The following records are required for -these tests:: - - Record TTL Class Address - localhost.test.build.10gen.cc. 86400 IN A 127.0.0.1 - localhost.sub.test.build.10gen.cc. 86400 IN A 127.0.0.1 - - Record TTL Class Port Target - _mongodb._tcp.test1.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test1.test.build.10gen.cc. 86400 IN SRV 27018 localhost.test.build.10gen.cc. - _mongodb._tcp.test2.test.build.10gen.cc. 86400 IN SRV 27018 localhost.test.build.10gen.cc. - _mongodb._tcp.test2.test.build.10gen.cc. 86400 IN SRV 27019 localhost.test.build.10gen.cc. - _mongodb._tcp.test3.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test5.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test6.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test7.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test8.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test10.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test11.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test12.test.build.10gen.cc. 86400 IN SRV 27017 localhost.build.10gen.cc. - _mongodb._tcp.test13.test.build.10gen.cc. 86400 IN SRV 27017 test.build.10gen.cc. - _mongodb._tcp.test14.test.build.10gen.cc. 86400 IN SRV 27017 localhost.not-test.build.10gen.cc. - _mongodb._tcp.test15.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.not-build.10gen.cc. - _mongodb._tcp.test16.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.not-10gen.cc. - _mongodb._tcp.test17.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.not-cc. - _mongodb._tcp.test18.test.build.10gen.cc. 86400 IN SRV 27017 localhost.sub.test.build.10gen.cc. - _mongodb._tcp.test19.test.build.10gen.cc. 86400 IN SRV 27017 localhost.evil.build.10gen.cc. - _mongodb._tcp.test19.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test20.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test21.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _customname._tcp.test22.test.build.10gen.cc 86400 IN SRV 27017 localhost.test.build.10gen.cc - - Record TTL Class Text - test5.test.build.10gen.cc. 86400 IN TXT "replicaSet=repl0&authSource=thisDB" - test6.test.build.10gen.cc. 86400 IN TXT "replicaSet=repl0" - test6.test.build.10gen.cc. 86400 IN TXT "authSource=otherDB" - test7.test.build.10gen.cc. 86400 IN TXT "ssl=false" - test8.test.build.10gen.cc. 86400 IN TXT "authSource" - test10.test.build.10gen.cc. 86400 IN TXT "socketTimeoutMS=500" - test11.test.build.10gen.cc. 86400 IN TXT "replicaS" "et=rep" "l0" - test20.test.build.10gen.cc. 86400 IN TXT "loadBalanced=true" - test21.test.build.10gen.cc. 86400 IN TXT "loadBalanced=false" - -Note that ``test4`` is omitted deliberately to test what happens with no SRV -record. ``test9`` is missing because it was deleted during the development of -the tests. The missing ``test.`` sub-domain in the SRV record target for -``test12`` is deliberate. ``test22`` is used to test a custom service name -(``customname``). - -In our tests we have used ``localhost.test.build.10gen.cc`` as the domain, and -then configured ``localhost.test.build.10gen.cc`` to resolve to 127.0.0.1. - -You need to adapt the records shown above to replace ``test.build.10gen.cc`` -with your own domain name, and update the "uri" field in the YAML or JSON files -in this directory with the actual domain. - -Test Format and Use -------------------- - -These YAML and JSON files contain the following fields: - -- ``uri``: a ``mongodb+srv`` connection string -- ``seeds``: the expected set of initial seeds discovered from the SRV record -- ``numSeeds``: the expected number of initial seeds discovered from the SRV - record. This is mainly used to test ``srvMaxHosts``, since randomly selected - hosts cannot be deterministically asserted. -- ``hosts``: the discovered topology's list of hosts once SDAM completes a scan -- ``numHosts``: the expected number of hosts discovered once SDAM completes a - scan. This is mainly used to test ``srvMaxHosts``, since randomly selected - hosts cannot be deterministically asserted. -- ``options``: the parsed `URI options`_ as discovered from the - `Connection String`_'s "Connection Options" component and SRV resolution - (e.g. TXT records, implicit ``tls`` default). -- ``parsed_options``: additional, parsed options from other `Connection String`_ - components. This is mainly used for asserting ``UserInfo`` (as ``user`` and - ``password``) and ``Auth database`` (as ``auth_database``). -- ``error``: indicates that the parsing of the URI, or the resolving or - contents of the SRV or TXT records included errors. -- ``comment``: a comment to indicate why a test would fail. - -.. _`Connection String`: ../../connection-string/connection-string-spec.rst -.. _`URI options`: ../../uri-options/uri-options.rst - -For each file, create a MongoClient initialized with the ``mongodb+srv`` -connection string. - -If ``seeds`` is specified, drivers SHOULD verify that the set of hosts in the -client's initial seedlist matches the list in ``seeds``. If ``numSeeds`` is -specified, drivers SHOULD verify that the size of that set matches ``numSeeds``. - -If ``hosts`` is specified, drivers MUST verify that the set of -ServerDescriptions in the client's TopologyDescription eventually matches the -list in ``hosts``. If ``numHosts`` is specified, drivers MUST verify that the -size of that set matches ``numHosts``. - -If ``options`` is specified, drivers MUST verify each of the values under -``options`` match the MongoClient's parsed value for that option. There may be -other options parsed by the MongoClient as well, which a test does not verify. - -If ``parsed_options`` is specified, drivers MUST verify that each of the values -under ``parsed_options`` match the MongoClient's parsed value for that option. -Supported values include, but are not limited to, ``user`` and ``password`` -(parsed from ``UserInfo``) and ``auth_database`` (parsed from -``Auth database``). - -If ``error`` is specified and ``true``, drivers MUST verify that an error has -been thrown. diff --git a/test/spec/load-balancers/README.rst b/test/spec/load-balancers/README.rst deleted file mode 100644 index 3975e7b0b7f..00000000000 --- a/test/spec/load-balancers/README.rst +++ /dev/null @@ -1,68 +0,0 @@ -=========================== -Load Balancer Support Tests -=========================== - -.. contents:: - ----- - -Introduction -============ - -This document describes how drivers should create load balanced clusters for -testing and how tests should be executed for such clusters. - -Testing Requirements -==================== - -For each server version that supports load balanced clusters, drivers MUST -add two Evergreen tasks: one with a sharded cluster with both authentication -and TLS enabled and one with a sharded cluster with authentication and TLS -disabled. In each task, the sharded cluster MUST be configured with two -mongos nodes running on localhost ports 27017 and 27018. The shard and config -servers may run on any free ports. Each task MUST also start up two TCP load -balancers operating in round-robin mode: one fronting both mongos servers and -one fronting a single mongos. - -Load Balancer Configuration ---------------------------- - -Drivers MUST use the ``run-load-balancer.sh`` script in -``drivers-evergreen-tools`` to start the TCP load balancers for Evergreen -tasks. This script MUST be run after the backing sharded cluster has already -been started. The script writes the URIs of the load balancers to a YAML -expansions file, which can be read by drivers via the ``expansions.update`` -Evergreen command. This will store the URIs into the ``SINGLE_MONGOS_LB_URI`` -and ``MULTI_MONGOS_LB_URI`` environment variables. - -Test Runner Configuration -------------------------- - -If the backing sharded cluster is configured with TLS enabled, drivers MUST -add the relevant TLS options to both ``SINGLE_MONGOS_LB_URI`` and -``MULTI_MONGOS_LB_URI`` to ensure that test clients can connect to the -cluster. Drivers MUST use the final URI stored in ``SINGLE_MONGOS_LB_URI`` -(with additional TLS options if required) to configure internal clients for -test runners (e.g. the internal MongoClient described by the `Unified Test -Format spec <../../unified-test-format/unified-test-format.rst>`__). - -In addition to modifying load balancer URIs, drivers MUST also mock server -support for returning a ``serviceId`` field in ``hello`` or legacy ``hello`` -command responses when running tests against a load-balanced cluster. This -can be done by using the value of ``topologyVersion.processId`` to set -``serviceId``. This MUST be done for all connections established by the test -runner, including those made by any internal clients. - -Tests -====== - -The YAML and JSON files in this directory contain platform-independent tests -written in the `Unified Test Format -<../../unified-test-format/unified-test-format.rst>`_. Drivers MUST run the -following test suites against a load balanced cluster: - -#. All test suites written in the Unified Test Format -#. Retryable Reads -#. Retryable Writes -#. Change Streams -#. Initial DNS Seedlist Discovery diff --git a/test/spec/max-staleness/README.rst b/test/spec/max-staleness/README.rst deleted file mode 100644 index 9cf945c85e6..00000000000 --- a/test/spec/max-staleness/README.rst +++ /dev/null @@ -1,72 +0,0 @@ -=================== -Max Staleness Tests -=================== - -This directory contains platform-independent tests that drivers can use -to prove their conformance to the Max Staleness Spec. The tests -are provided in both YAML and JSON formats, and drivers may test against -whichever format is more convenient for them. - -Test Format and Use -------------------- - -YAML files contain the following setup for each test: - -- ``heartbeatFrequencyMS``: optional int - -- ``topology_description``: the state of a mocked cluster - - - ``type``: the TopologyType - - - ``servers``: a list of ServerDescriptions, each with: - - - ``address``: a "host:port" - - - ``type``: a ServerType - - - ``avg_rtt_ms``: average round trip time in milliseconds [1]_ - - - ``lastWrite``: subdocument - - - ``lastWriteDate``: nonzero int64, milliseconds since some past time - - - ``maxWireVersion``: an int - - - ``lastUpdateTime``: milliseconds since the Unix epoch - -- ``read_preference``: a read preference document - -For each test, create a MongoClient. -Configure it with the heartbeatFrequencyMS specified by the test, -or accept the driver's default heartbeatFrequencyMS if the test omits this field. - -(Single-threaded and multi-threaded clients now make heartbeatFrequencyMS configurable. -This is a change in Server Discovery and Monitoring to support maxStalenessSeconds. -Before, multi-threaded clients were allowed to make it configurable or not.) - -For each test, create a new TopologyDescription object initialized with the -values from ``topology_description``. Initialize ServerDescriptions from the -provided data. Create a ReadPreference object initialized with the values -from ``read_preference``. Select servers that match the ReadPreference. - -Each test specifies that it expects an error, or specifies two sets of servers: - -- ``error: true`` -- ``suitable_servers``: the set of servers in the TopologyDescription - that are suitable for the ReadPreference, without taking ``avg_rtt_ms`` - into account. -- ``in_latency_window``: the set of suitable servers whose round trip time - qualifies them according to the default latency threshold of 15ms. - In each test there is one server in the latency window, to ensure - tests pass or fail deterministically. - -If the file contains ``error: true``, drivers MUST test that they throw an -error during server selection due to an invalid read preference. For other -files, drivers MUST test that they correctly select the set of servers in -``in_latency_window``. - -Drivers MAY also test that before filtration by latency, they select the -specified set of "suitable" servers. - -.. [1] ``avg_rtt_ms`` is included merely for consistency with - Server Selection tests. It is not significant in Max Staleness tests. diff --git a/test/spec/read-write-concern/README.rst b/test/spec/read-write-concern/README.rst deleted file mode 100644 index 4c4dd984f29..00000000000 --- a/test/spec/read-write-concern/README.rst +++ /dev/null @@ -1,68 +0,0 @@ -============================ -Read and Write Concern Tests -============================ - -The YAML and JSON files in this directory tree are platform-independent tests -that drivers can use to prove their conformance to the Read and Write Concern -specification. - -Version -------- - -Files in the "specifications" repository have no version scheme. They are not -tied to a MongoDB server version. - -Format ------- - -Connection String -~~~~~~~~~~~~~~~~~ - -These tests are designed to exercise the connection string parsing related -to read concern and write concern. - -Each YAML file contains an object with a single ``tests`` key. This key is an -array of test case objects, each of which have the following keys: - -- ``description``: A string describing the test. -- ``uri``: A string containing the URI to be parsed. -- ``valid:``: a boolean indicating if parsing the uri should result in an error. -- ``writeConcern:`` A document indicating the expected write concern. -- ``readConcern:`` A document indicating the expected read concern. - -If a test case includes a null value for one of these keys, or if the key is missing, -no assertion is necessary. This both simplifies parsing of the test files and allows flexibility -for drivers that might substitute default values *during* parsing. - -Document -~~~~~~~~ - -These tests are designed to ensure compliance with the spec in relation to what should be -sent to the server. - -Each YAML file contains an object with a single ``tests`` key. This key is an -array of test case objects, each of which have the following keys: - -- ``description``: A string describing the test. -- ``valid:``: a boolean indicating if the write concern created from the document is valid. -- ``writeConcern:`` A document indicating the write concern to use. -- ``writeConcernDocument:`` A document indicating the write concern to be sent to the server. -- ``readConcern:`` A document indicating the read concern to use. -- ``readConcernDocument:`` A document indicating the read concern to be sent to the server. -- ``isServerDefault:`` Indicates whether the read or write concern is considered the server's default. -- ``isAcknowledged:`` Indicates if the write concern should be considered acknowledged. - -Operation -~~~~~~~~~ - -These tests check that the default write concern is omitted in operations. - -The tests utilize the `Unified Test Format <../../unified-test-format/unified-test-format.md>`__. - -Use as unit tests -================= - -Testing whether a URI is valid or not should simply be a matter of checking -whether URI parsing raises an error or exception. -Testing for emitted warnings may require more legwork (e.g. configuring a log -handler and watching for output). diff --git a/test/spec/retryable-reads/README.rst b/test/spec/retryable-reads/README.rst deleted file mode 100644 index 06c9bb78863..00000000000 --- a/test/spec/retryable-reads/README.rst +++ /dev/null @@ -1,249 +0,0 @@ -===================== -Retryable Reads Tests -===================== - -.. contents:: - ----- - -Introduction -============ - -The YAML and JSON files in the ``legacy`` and ``unified`` sub-directories are platform-independent tests -that drivers can use to prove their conformance to the Retryable Reads spec. Tests in the -``unified`` directory are written using the `Unified Test Format <../../unified-test-format/unified-test-format.rst>`_. -Tests in the ``legacy`` directory are written using the format described below. - -Prose tests, which are not easily expressed in YAML, are also presented -in this file. Those tests will need to be manually implemented by each driver. - -Tests will require a MongoClient created with options defined in the tests. -Integration tests will require a running MongoDB cluster with server versions -4.0 or later. - -N.B. The spec specifies 3.6 as the minimum server version: however, -``failCommand`` is not supported on 3.6, so for now, testing requires MongoDB -4.0. Once `DRIVERS-560`_ is resolved, we will attempt to adapt its live failure -integration tests to test Retryable Reads on MongoDB 3.6. - -.. _DRIVERS-560: https://jira.mongodb.org/browse/DRIVERS-560 - -Server Fail Point -================= - -See: `Server Fail Point`_ in the Transactions spec test suite. - -.. _Server Fail Point: ../../transactions/tests#server-fail-point - -Disabling Fail Point after Test Execution ------------------------------------------ - -After each test that configures a fail point, drivers should disable the -``failCommand`` fail point to avoid spurious failures in -subsequent tests. The fail point may be disabled like so:: - - db.runCommand({ - configureFailPoint: "failCommand", - mode: "off" - }); - -Network Error Tests -=================== - -Network error tests are expressed in YAML and should be run against a standalone, -shard cluster, or single-node replica set. - - -Test Format ------------ - -Each YAML file has the following keys: - -- ``runOn`` (optional): An array of server version and/or topology requirements - for which the tests can be run. If the test environment satisfies one or more - of these requirements, the tests may be executed; otherwise, this file should - be skipped. If this field is omitted, the tests can be assumed to have no - particular requirements and should be executed. Each element will have some or - all of the following fields: - - - ``minServerVersion`` (optional): The minimum server version (inclusive) - required to successfully run the tests. If this field is omitted, it should - be assumed that there is no lower bound on the required server version. - - - ``maxServerVersion`` (optional): The maximum server version (inclusive) - against which the tests can be run successfully. If this field is omitted, - it should be assumed that there is no upper bound on the required server - version. - - - ``topology`` (optional): An array of server topologies against which the - tests can be run successfully. Valid topologies are "single", - "replicaset", "sharded", and "load-balanced". If this field is omitted, - the default is all topologies (i.e. ``["single", "replicaset", "sharded", - "load-balanced"]``). - - - ``serverless``: Optional string. Whether or not the test should be run on - serverless instances imitating sharded clusters. Valid values are "require", - "forbid", and "allow". If "require", the test MUST only be run on serverless - instances. If "forbid", the test MUST NOT be run on serverless instances. If - omitted or "allow", this option has no effect. - - The test runner MUST be informed whether or not serverless is being used in - order to determine if this requirement is met (e.g. through an environment - variable or configuration option). Since the serverless proxy imitates a - mongos, the runner is not capable of determining this by issuing a server - command such as ``buildInfo`` or ``hello``. - -- ``database_name`` and ``collection_name``: Optional. The database and - collection to use for testing. - -- ``bucket_name``: Optional. The GridFS bucket name to use for testing. - -- ``data``: The data that should exist in the collection(s) under test before - each test run. This will typically be an array of documents to be inserted - into the collection under test (i.e. ``collection_name``); however, this field - may also be an object mapping collection names to arrays of documents to be - inserted into the specified collection. - -- ``tests``: An array of tests that are to be run independently of each other. - Each test will have some or all of the following fields: - - - ``description``: The name of the test. - - - ``clientOptions``: Optional, parameters to pass to MongoClient(). - - - ``useMultipleMongoses`` (optional): If ``true``, and the topology type is - ``Sharded``, the MongoClient for this test should be initialized with multiple - mongos seed addresses. If ``false`` or omitted, only a single mongos address - should be specified. - - If ``true``, and the topology type is ``LoadBalanced``, the MongoClient for - this test should be initialized with the URI of the load balancer fronting - multiple servers. If ``false`` or omitted, the MongoClient for this test - should be initialized with the URI of the load balancer fronting a single - server. - - ``useMultipleMongoses`` only affects ``Sharded`` and ``LoadBalanced`` topologies. - - - ``skipReason``: Optional, string describing why this test should be skipped. - - - ``failPoint``: Optional, a server fail point to enable, expressed as the - configureFailPoint command to run on the admin database. - - - ``operations``: An array of documents describing an operation to be - executed. Each document has the following fields: - - - ``name``: The name of the operation on ``object``. - - - ``object``: The name of the object to perform the operation on. Can be - "database", "collection", "client", or "gridfsbucket." - - - ``arguments``: Optional, the names and values of arguments. - - - ``result``: Optional. The return value from the operation, if any. This - field may be a scalar (e.g. in the case of a count), a single document, or - an array of documents in the case of a multi-document read. - - - ``error``: Optional. If ``true``, the test should expect an error or - exception. - - - ``expectations``: Optional list of command-started events. - -GridFS Tests ------------- - -GridFS tests are denoted by when the YAML file contains ``bucket_name``. -The ``data`` field will also be an object, which maps collection names -(e.g. ``fs.files``) to an array of documents that should be inserted into -the specified collection. - -``fs.files`` and ``fs.chunks`` should be created in the database -specified by ``database_name``. This could be done via inserts or by -creating GridFSBuckets—using the GridFS ``bucketName`` (see -`GridFSBucket spec`_) specified by ``bucket_name`` field in the YAML -file—and calling ``upload_from_stream_with_id`` with the appropriate -data. - -``Download`` tests should be tested against ``GridFS.download_to_stream``. -``DownloadByName`` tests should be tested against -``GridFS.download_to_stream_by_name``. - - -.. _GridFSBucket spec: https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.rst#configurable-gridfsbucket-class - - -Speeding Up Tests ------------------ - -Drivers can greatly reduce the execution time of tests by setting `heartbeatFrequencyMS`_ -and `minHeartbeatFrequencyMS`_ (internally) to a small value (e.g. 5ms), below what -is normally permitted in the SDAM spec. If a test specifies an explicit value for -heartbeatFrequencyMS (e.g. client or URI options), drivers MUST use that value. - -.. _minHeartbeatFrequencyMS: ../../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#minheartbeatfrequencyms -.. _heartbeatFrequencyMS: ../../server-discovery-and-monitoring/server-discovery-and-monitoring.rst#heartbeatfrequencyms - -Optional Enumeration Commands -============================= - -A driver only needs to test the optional enumeration commands it has chosen to -implement (e.g. ``Database.listCollectionNames()``). - -PoolClearedError Retryability Test -================================== - -This test will be used to ensure drivers properly retry after encountering PoolClearedErrors. -It MUST be implemented by any driver that implements the CMAP specification. -This test requires MongoDB 4.2.9+ for ``blockConnection`` support in the failpoint. - -1. Create a client with maxPoolSize=1 and retryReads=true. If testing against a - sharded deployment, be sure to connect to only a single mongos. - -2. Enable the following failpoint:: - - { - configureFailPoint: "failCommand", - mode: { times: 1 }, - data: { - failCommands: ["find"], - errorCode: 91, - blockConnection: true, - blockTimeMS: 1000 - } - } - -3. Start two threads and attempt to perform a ``findOne`` simultaneously on both. - -4. Verify that both ``findOne`` attempts succeed. - -5. Via CMAP monitoring, assert that the first check out succeeds. - -6. Via CMAP monitoring, assert that a PoolClearedEvent is then emitted. - -7. Via CMAP monitoring, assert that the second check out then fails due to a - connection error. - -8. Via Command Monitoring, assert that exactly three ``find`` CommandStartedEvents - were observed in total. - -9. Disable the failpoint. - - -Changelog -========= - -:2022-01-10: Create legacy and unified subdirectories for new unified tests - -:2021-08-27: Clarify behavior of ``useMultipleMongoses`` for ``LoadBalanced`` topologies. - -:2019-03-19: Add top-level ``runOn`` field to denote server version and/or - topology requirements requirements for the test file. Removes the - ``minServerVersion`` and ``topology`` top-level fields, which are - now expressed within ``runOn`` elements. - - Add test-level ``useMultipleMongoses`` field. - -:2020-09-16: Suggest lowering heartbeatFrequencyMS in addition to minHeartbeatFrequencyMS. - -:2021-03-23: Add prose test for retrying PoolClearedErrors - -:2021-04-29: Add ``load-balanced`` to test topology requirements. diff --git a/test/spec/server-discovery-and-monitoring/README.rst b/test/spec/server-discovery-and-monitoring/README.rst deleted file mode 100644 index caadb9b4b69..00000000000 --- a/test/spec/server-discovery-and-monitoring/README.rst +++ /dev/null @@ -1,265 +0,0 @@ -===================================== -Server Discovery And Monitoring Tests -===================================== - -.. contents:: - ----- - -The YAML and JSON files in this directory tree are platform-independent tests -that drivers can use to prove their conformance to the -Server Discovery And Monitoring Spec. - -Additional prose tests, that cannot be represented as spec tests, are -described and MUST be implemented. - -Version -------- - -Files in the "specifications" repository have no version scheme. They are not -tied to a MongoDB server version. - -Format ------- - -Each YAML file has the following keys: - -- description: A textual description of the test. -- uri: A connection string. -- phases: An array of "phase" objects. - A phase of the test optionally sends inputs to the client, - then tests the client's resulting TopologyDescription. - -Each phase object has the following keys: - -- description: (optional) A textual description of this phase. -- responses: (optional) An array of "response" objects. If not provided, - the test runner should construct the client and perform assertions specified - in the outcome object without processing any responses. -- applicationErrors: (optional) An array of "applicationError" objects. -- outcome: An "outcome" object representing the TopologyDescription. - -A response is a pair of values: - -- The source, for example "a:27017". - This is the address the client sent the "hello" or legacy hello command to. -- A hello or legacy hello response, for example ``{ok: 1, helloOk: true, isWritablePrimary: true}``. - If the response includes an electionId it is shown in extended JSON like - ``{"$oid": "000000000000000000000002"}``. - The empty response `{}` indicates a network error - when attempting to call "hello" or legacy hello. - -An "applicationError" object has the following keys: - -- address: The source address, for example "a:27017". -- generation: (optional) The error's generation number, for example ``1``. - When absent this value defaults to the pool's current generation number. -- maxWireVersion: The ``maxWireVersion`` of the connection the error occurs - on, for example ``9``. Added to support testing the behavior of "not writable primary" - errors on <4.2 and >=4.2 servers. -- when: A string describing when this mock error should occur. Supported - values are: - - - "beforeHandshakeCompletes": Simulate this mock error as if it occurred - during a new connection's handshake for an application operation. - - "afterHandshakeCompletes": Simulate this mock error as if it occurred - on an established connection for an application operation (i.e. after - the connection pool check out succeeds). - -- type: The type of error to mock. Supported values are: - - - "command": A command error. Always accompanied with a "response". - - "network": A non-timeout network error. - - "timeout": A network timeout error. - -- response: (optional) A command error response, for example - ``{ok: 0, errmsg: "not primary"}``. Present if and only if ``type`` is - "command". Note the server only returns "not primary" if the "hello" command - has been run on this connection. Otherwise the legacy error message is returned. - -In non-monitoring tests, an "outcome" represents the correct -TopologyDescription that results from processing the responses in the phases -so far. It has the following keys: - -- topologyType: A string like "ReplicaSetNoPrimary". -- setName: A string with the expected replica set name, or null. -- servers: An object whose keys are addresses like "a:27017", and whose values - are "server" objects. -- logicalSessionTimeoutMinutes: null or an integer. -- maxSetVersion: absent or an integer. -- maxElectionId: absent or a BSON ObjectId. -- compatible: absent or a bool. - -A "server" object represents a correct ServerDescription within the client's -current TopologyDescription. It has the following keys: - -- type: A ServerType name, like "RSSecondary". See `ServerType <../server-discovery-and-monitoring.rst#servertype>`_ for details pertaining to async and multi-threaded drivers. -- setName: A string with the expected replica set name, or null. -- setVersion: absent or an integer. -- electionId: absent, null, or an ObjectId. -- logicalSessionTimeoutMinutes: absent, null, or an integer. -- minWireVersion: absent or an integer. -- maxWireVersion: absent or an integer. -- topologyVersion: absent, null, or a topologyVersion document. -- pool: (optional) A "pool" object. - -A "pool" object represents a correct connection pool for a given server. -It has the following keys: - -- generation: This server's expected pool generation, like ``0``. - -In monitoring tests, an "outcome" contains a list of SDAM events that should -have been published by the client as a result of processing hello or legacy hello -responses in the current phase. Any SDAM events published by the client during its -construction (that is, prior to processing any of the responses) should be -combined with the events published during processing of hello or legacy hello -responses of the first phase of the test. A test MAY explicitly verify events -published during client construction by providing an empty responses array for the -first phase. - - -Use as unittests ----------------- - -Mocking -~~~~~~~ - -Drivers should be able to test their server discovery and monitoring logic without -any network I/O, by parsing hello (or legacy hello) and application error from the -test file and passing them into the driver code. Parts of the client and -monitoring code may need to be mocked or subclassed to achieve this. -`A reference implementation for PyMongo 3.10.1 is available here -`_. - -Initialization -~~~~~~~~~~~~~~ - -For each file, create a fresh client object initialized with the file's "uri". - -All files in the "single" directory include a connection string with one host -and no "replicaSet" option. -Set the client's initial TopologyType to Single, however that is achieved using the client's API. -(The spec says "The user MUST be able to set the initial TopologyType to Single" -without specifying how.) - -All files in the "sharded" directory include a connection string with multiple hosts -and no "replicaSet" option. -Set the client's initial TopologyType to Unknown or Sharded, depending on the client's API. - -All files in the "rs" directory include a connection string with a "replicaSet" option. -Set the client's initial TopologyType to ReplicaSetNoPrimary. -(For most clients, parsing a connection string with a "replicaSet" option -automatically sets the TopologyType to ReplicaSetNoPrimary.) - -Set up a listener to collect SDAM events published by the client, including -events published during client construction. - -Test Phases -~~~~~~~~~~~ - -For each phase in the file: - -#. Parse the "responses" array. Pass in the responses in order to the driver - code. If a response is the empty object ``{}``, simulate a network error. - -#. Parse the "applicationErrors" array. For each element, simulate the given - error as if it occurred while running an application operation. Note that - it is sufficient to construct a mock error and call the procedure which - updates the topology, e.g. - ``topology.handleApplicationError(address, generation, maxWireVersion, error)``. - -For non-monitoring tests, -once all responses are processed, assert that the phase's "outcome" object -is equivalent to the driver's current TopologyDescription. - -For monitoring tests, once all responses are processed, assert that the -events collected so far by the SDAM event listener are equivalent to the -events specified in the phase. - -Some fields such as "logicalSessionTimeoutMinutes", "compatible", and -"topologyVersion" were added later and haven't been added to all test files. -If these fields are present, test that they are equivalent to the fields of -the driver's current TopologyDescription or ServerDescription. - -For monitoring tests, clear the list of events collected so far. - -Continue until all phases have been executed. - -Integration Tests ------------------ - -Integration tests are provided in the "unified" directory and are -written in the `Unified Test Format -<../unified-test-format/unified-test-format.rst>`_. - -Prose Tests ------------ - -The following prose tests cannot be represented as spec tests and MUST be -implemented. - -Streaming protocol Tests -~~~~~~~~~~~~~~~~~~~~~~~~ - -Drivers that implement the streaming protocol (multi-threaded or -asynchronous drivers) must implement the following tests. Each test should be -run against a standalone, replica set, and sharded cluster unless otherwise -noted. - -Some of these cases should already be tested with the old protocol; in -that case just verify the test cases succeed with the new protocol. - -1. Configure the client with heartbeatFrequencyMS set to 500, - overriding the default of 10000. Assert the client processes - hello and legacy hello replies more frequently (approximately every 500ms). - -RTT Tests -~~~~~~~~~ - -Run the following test(s) on MongoDB 4.4+. - -1. Test that RTT is continuously updated. - - #. Create a client with ``heartbeatFrequencyMS=500``, - ``appName=streamingRttTest``, and subscribe to server events. - - #. Run a find command to wait for the server to be discovered. - - #. Sleep for 2 seconds. This must be long enough for multiple heartbeats - to succeed. - - #. Assert that each ``ServerDescriptionChangedEvent`` includes a non-zero - RTT. - - #. Configure the following failpoint to block hello or legacy hello commands - for 250ms which should add extra latency to each RTT check:: - - db.adminCommand({ - configureFailPoint: "failCommand", - mode: {times: 1000}, - data: { - failCommands: ["hello"], // or the legacy hello command - blockConnection: true, - blockTimeMS: 500, - appName: "streamingRttTest", - }, - }); - - #. Wait for the server's RTT to exceed 250ms. Eventually the average RTT - should also exceed 500ms but we use 250ms to speed up the test. Note - that the `Server Description Equality`_ rule means that - ServerDescriptionChangedEvents will not be published. This test may - need to use a driver specific helper to obtain the latest RTT instead. - If the RTT does not exceed 250ms after 10 seconds, consider the test - failed. - - #. Disable the failpoint:: - - db.adminCommand({ - configureFailPoint: "failCommand", - mode: "off", - }); - -.. Section for links. - -.. _Server Description Equality: /source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#server-description-equality diff --git a/test/spec/server-discovery-and-monitoring/monitoring/README.rst b/test/spec/server-discovery-and-monitoring/monitoring/README.rst deleted file mode 100644 index 7c741544ec2..00000000000 --- a/test/spec/server-discovery-and-monitoring/monitoring/README.rst +++ /dev/null @@ -1,12 +0,0 @@ -===================== -SDAM Monitoring Tests -===================== - -The YAML and JSON files in this directory tree are platform-independent tests -that drivers can use to prove their conformance to the SDAM Monitoring spec. - -Format ------- - -The format of the tests follows the standard SDAM test and should be able to leverage -the existing test runner in each language for the SDAM tests. diff --git a/test/spec/server-selection/README.rst b/test/spec/server-selection/README.rst deleted file mode 100644 index 62a6abce37c..00000000000 --- a/test/spec/server-selection/README.rst +++ /dev/null @@ -1,73 +0,0 @@ -====================== -Server Selection Tests -====================== - -This directory contains platform-independent tests that drivers can use -to prove their conformance to the Server Selection spec. The tests -are provided in both YAML and JSON formats, and drivers may test against -whichever format is more convenient for them. - -Version -------- - -Specifications have no version scheme. -They are not tied to a MongoDB server version, -and it is our intention that each specification moves from "draft" to "final" -with no further versions; it is superseded by a future spec, not revised. - -However, implementers must have stable sets of tests to target. -As test files evolve they will be occasionally tagged like -"server-selection-tests-2015-01-04", until the spec is final. - -Test Format and Use -------------------- - -There are two types of tests for the server selection spec, tests for -round trip time (RTT) calculation, and tests for server selection logic. - -Drivers should be able to test their server selection logic -without any network I/O, by parsing topology descriptions and read preference -documents from the test files and passing them into driver code. Parts of the -server selection code may need to be mocked or subclassed to achieve this. - -RTT Calculation Tests ->>>>>>>>>>>>>>>>>>>>> - -These YAML files contain the following keys: - -- ``avg_rtt_ms``: a server's previous average RTT, in milliseconds -- ``new_rtt_ms``: a new RTT value for this server, in milliseconds -- ``new_avg_rtt``: this server's newly-calculated average RTT, in milliseconds - -For each file, create a server description object initialized with ``avg_rtt_ms``. -Parse ``new_rtt_ms``, and ensure that the new RTT value for the mocked server -description is equal to ``new_avg_rtt``. - -If driver architecture doesn't easily allow construction of server description -objects in isolation, unit testing the EWMA algorithm using these inputs -and expected outputs is acceptable. - -Server Selection Logic Tests ->>>>>>>>>>>>>>>>>>>>>>>>>>>> - -These YAML files contain the following setup for each test: - -- ``topology_description``: the state of a mocked cluster -- ``operation``: the kind of operation to perform, either read or write -- ``read_preference``: a read preference document - -For each file, create a new TopologyDescription object initialized with the values -from ``topology_description``. Create a ReadPreference object initialized with the -values from ``read_preference``. - -Together with "operation", pass the newly-created TopologyDescription and ReadPreference -to server selection, and ensure that it selects the correct subset of servers from -the TopologyDescription. Each YAML file contains a key for these stages of server selection: - -- ``suitable_servers``: the set of servers in topology_description that are suitable, as - per the Server Selection spec, given operation and read_preference -- ``in_latency_window``: the set of suitable_servers that fall within the latency window - -Drivers implementing server selection MUST test that their implementation -correctly returns the set of servers in ``in_latency_window``. Drivers SHOULD also test -against ``suitable_servers`` if possible. diff --git a/test/spec/sessions/README.rst b/test/spec/sessions/README.rst deleted file mode 100644 index d9bd9d00493..00000000000 --- a/test/spec/sessions/README.rst +++ /dev/null @@ -1,46 +0,0 @@ -==================== -Driver Session Tests -==================== - -.. contents:: - ----- - -Introduction -============ - -The YAML and JSON files in this directory are platform-independent tests -meant to exercise a driver's implementation of sessions. These tests utilize the -`Unified Test Format <../../unified-test-format/unified-test-format.rst>`__. - -Several prose tests, which are not easily expressed in YAML, are also presented -in the Driver Sessions Spec. Those tests will need to be manually implemented -by each driver. - -Snapshot session tests -====================== -Snapshot sessions tests require server of version 5.0 or higher and -replica set or a sharded cluster deployment. -Default snapshot history window on the server is 5 minutes. Running the test in debug mode, or in any other slow configuration -may lead to `SnapshotTooOld` errors. Drivers can work around this issue by increasing the server's `minSnapshotHistoryWindowInSeconds` parameter, for example: - -.. code:: python - - client.admin.command('setParameter', 1, minSnapshotHistoryWindowInSeconds=60) - -Prose tests -``````````` - -1. Setting both ``snapshot`` and ``causalConsistency`` to true is not allowed -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -* ``client.startSession(snapshot = true, causalConsistency = true)`` -* Assert that an error was raised by driver - -Changelog -========= - -:2019-05-15: Initial version. -:2021-06-15: Added snapshot-session tests. Introduced legacy and unified folders. -:2021-07-30: Use numbering for prose test -:2022-02-11: Convert legacy tests to unified format diff --git a/test/spec/transactions/README.rst b/test/spec/transactions/README.rst deleted file mode 100644 index a1b27dcf013..00000000000 --- a/test/spec/transactions/README.rst +++ /dev/null @@ -1,663 +0,0 @@ -================== -Transactions Tests -================== - -.. contents:: - ----- - -Introduction -============ - -The YAML and JSON files in the ``legacy`` and ``unified`` sub-directories are -platform-independent tests that drivers can use to prove their conformance to -the Transactions Spec. The tests in the ``legacy`` directory are designed with -the intention of sharing some test-runner code with the CRUD Spec tests and the -Command Monitoring Spec tests. The format for these tests and instructions for -executing them are provided in the following sections. Tests in the -``unified`` directory are written using the `Unified Test Format -<../../unified-test-format/unified-test-format.rst>`_. - -Several prose tests, which are not easily expressed in YAML, are also presented -in this file. Those tests will need to be manually implemented by each driver. - -Server Fail Point -================= - -failCommand -``````````` - -Some tests depend on a server fail point, expressed in the ``failPoint`` field. -For example the ``failCommand`` fail point allows the client to force the -server to return an error. Keep in mind that the fail point only triggers for -commands listed in the "failCommands" field. See `SERVER-35004`_ and -`SERVER-35083`_ for more information. - -.. _SERVER-35004: https://jira.mongodb.org/browse/SERVER-35004 -.. _SERVER-35083: https://jira.mongodb.org/browse/SERVER-35083 - -The ``failCommand`` fail point may be configured like so:: - - db.adminCommand({ - configureFailPoint: "failCommand", - mode: , - data: { - failCommands: ["commandName", "commandName2"], - closeConnection: , - errorCode: , - writeConcernError: , - appName: , - blockConnection: , - blockTimeMS: , - } - }); - -``mode`` is a generic fail point option and may be assigned a string or document -value. The string values ``"alwaysOn"`` and ``"off"`` may be used to enable or -disable the fail point, respectively. A document may be used to specify either -``times`` or ``skip``, which are mutually exclusive: - -- ``{ times: }`` may be used to limit the number of times the fail - point may trigger before transitioning to ``"off"``. -- ``{ skip: }`` may be used to defer the first trigger of a fail - point, after which it will transition to ``"alwaysOn"``. - -The ``data`` option is a document that may be used to specify options that -control the fail point's behavior. ``failCommand`` supports the following -``data`` options, which may be combined if desired: - -- ``failCommands``: Required, the list of command names to fail. -- ``closeConnection``: Boolean option, which defaults to ``false``. If - ``true``, the command will not be executed, the connection will be closed, and - the client will see a network error. -- ``errorCode``: Integer option, which is unset by default. If set, the command - will not be executed and the specified command error code will be returned as - a command error. -- ``appName``: A string to filter which MongoClient should be affected by - the failpoint. `New in mongod 4.4.0-rc2 `_. -- ``blockConnection``: Whether the server should block the affected commands. - Default false. -- ``blockTimeMS``: The number of milliseconds the affect commands should be - blocked for. Required when blockConnection is true. - `New in mongod 4.3.4 `_. - -Speeding Up Tests -================= - -See `Speeding Up Tests <../../retryable-reads/tests/README.rst#speeding-up-tests>`_ in the retryable reads spec tests. - -Test Format -=========== - -Each YAML file has the following keys: - -- ``runOn`` (optional): An array of server version and/or topology requirements - for which the tests can be run. If the test environment satisfies one or more - of these requirements, the tests may be executed; otherwise, this file should - be skipped. If this field is omitted, the tests can be assumed to have no - particular requirements and should be executed. Each element will have some or - all of the following fields: - - - ``minServerVersion`` (optional): The minimum server version (inclusive) - required to successfully run the tests. If this field is omitted, it should - be assumed that there is no lower bound on the required server version. - - - ``maxServerVersion`` (optional): The maximum server version (inclusive) - against which the tests can be run successfully. If this field is omitted, - it should be assumed that there is no upper bound on the required server - version. - - - ``topology`` (optional): An array of server topologies against which the - tests can be run successfully. Valid topologies are "single", "replicaset", - and "sharded". If this field is omitted, the default is all topologies (i.e. - ``["single", "replicaset", "sharded"]``). - - - ``serverless``: Optional string. Whether or not the test should be run on - serverless instances imitating sharded clusters. Valid values are "require", - "forbid", and "allow". If "require", the test MUST only be run on serverless - instances. If "forbid", the test MUST NOT be run on serverless instances. If - omitted or "allow", this option has no effect. - - The test runner MUST be informed whether or not serverless is being used in - order to determine if this requirement is met (e.g. through an environment - variable or configuration option). Since the serverless proxy imitates a - mongos, the runner is not capable of determining this by issuing a server - command such as ``buildInfo`` or ``hello``. - -- ``database_name`` and ``collection_name``: The database and collection to use - for testing. - -- ``data``: The data that should exist in the collection under test before each - test run. - -- ``tests``: An array of tests that are to be run independently of each other. - Each test will have some or all of the following fields: - - - ``description``: The name of the test. - - - ``skipReason``: Optional, string describing why this test should be - skipped. - - - ``useMultipleMongoses`` (optional): If ``true``, the MongoClient for this - test should be initialized with multiple mongos seed addresses. If ``false`` - or omitted, only a single mongos address should be specified. This field has - no effect for non-sharded topologies. - - - ``clientOptions``: Optional, parameters to pass to MongoClient(). - - - ``failPoint``: Optional, a server failpoint to enable expressed as the - configureFailPoint command to run on the admin database. This option and - ``useMultipleMongoses: true`` are mutually exclusive. - - - ``sessionOptions``: Optional, map of session names (e.g. "session0") to - parameters to pass to MongoClient.startSession() when creating that session. - - - ``operations``: Array of documents, each describing an operation to be - executed. Each document has the following fields: - - - ``name``: The name of the operation on ``object``. - - - ``object``: The name of the object to perform the operation on. Can be - "database", "collection", "session0", "session1", or "testRunner". See - the "targetedFailPoint" operation in `Special Test Operations`_. - - - ``collectionOptions``: Optional, parameters to pass to the Collection() - used for this operation. - - - ``databaseOptions``: Optional, parameters to pass to the Database() - used for this operation. - - - ``command_name``: Present only when ``name`` is "runCommand". The name - of the command to run. Required for languages that are unable preserve - the order keys in the "command" argument when parsing JSON/YAML. - - - ``arguments``: Optional, the names and values of arguments. - - - ``error``: Optional. If true, the test should expect an error or - exception. This could be a server-generated or a driver-generated error. - - - ``result``: The return value from the operation, if any. This field may - be a single document or an array of documents in the case of a - multi-document read. If the operation is expected to return an error, the - ``result`` is a single document that has one or more of the following - fields: - - - ``errorContains``: A substring of the expected error message. - - - ``errorCodeName``: The expected "codeName" field in the server - error response. - - - ``errorLabelsContain``: A list of error label strings that the - error is expected to have. - - - ``errorLabelsOmit``: A list of error label strings that the - error is expected not to have. - - - ``expectations``: Optional list of command-started events. - - - ``outcome``: Document describing the return value and/or expected state of - the collection after the operation is executed. Contains the following - fields: - - - ``collection``: - - - ``data``: The data that should exist in the collection after the - operations have run, sorted by "_id". - -Use as Integration Tests -======================== - -Run a MongoDB replica set with a primary, a secondary, and an arbiter, -**server version 4.0.0 or later**. (Including a secondary ensures that -server selection in a transaction works properly. Including an arbiter helps -ensure that no new bugs have been introduced related to arbiters.) - -A driver that implements support for sharded transactions MUST also run these -tests against a MongoDB sharded cluster with multiple mongoses and -**server version 4.2 or later**. Some tests require -initializing the MongoClient with multiple mongos seeds to ensures that mongos -transaction pinning and the recoveryToken works properly. - -Load each YAML (or JSON) file using a Canonical Extended JSON parser. - -Then for each element in ``tests``: - -#. If the ``skipReason`` field is present, skip this test completely. -#. Create a MongoClient and call - ``client.admin.runCommand({killAllSessions: []})`` to clean up any open - transactions from previous test failures. Ignore a command failure with - error code 11601 ("Interrupted") to work around `SERVER-38335`_. - - - Running ``killAllSessions`` cleans up any open transactions from - a previously failed test to prevent the current test from blocking. - It is sufficient to run this command once before starting the test suite - and once after each failed test. - - When testing against a sharded cluster run this command on ALL mongoses. - -#. Create a collection object from the MongoClient, using the ``database_name`` - and ``collection_name`` fields of the YAML file. -#. Drop the test collection, using writeConcern "majority". -#. Execute the "create" command to recreate the collection, using writeConcern - "majority". (Creating the collection inside a transaction is prohibited, so - create it explicitly.) -#. If the YAML file contains a ``data`` array, insert the documents in ``data`` - into the test collection, using writeConcern "majority". -#. When testing against a sharded cluster run a ``distinct`` command on the - newly created collection on all mongoses. For an explanation see, - `Why do tests that run distinct sometimes fail with StaleDbVersion?`_ -#. If ``failPoint`` is specified, its value is a configureFailPoint command. - Run the command on the admin database to enable the fail point. -#. Create a **new** MongoClient ``client``, with Command Monitoring listeners - enabled. (Using a new MongoClient for each test ensures a fresh session pool - that hasn't executed any transactions previously, so the tests can assert - actual txnNumbers, starting from 1.) Pass this test's ``clientOptions`` if - present. - - - When testing against a sharded cluster and ``useMultipleMongoses`` is - ``true`` the client MUST be created with multiple (valid) mongos seed - addreses. - -#. Call ``client.startSession`` twice to create ClientSession objects - ``session0`` and ``session1``, using the test's "sessionOptions" if they - are present. Save their lsids so they are available after calling - ``endSession``, see `Logical Session Id`_. -#. For each element in ``operations``: - - - If the operation ``name`` is a special test operation type, execute it and - go to the next operation, otherwise proceed to the next step. - - Enter a "try" block or your programming language's closest equivalent. - - Create a Database object from the MongoClient, using the ``database_name`` - field at the top level of the test file. - - Create a Collection object from the Database, using the - ``collection_name`` field at the top level of the test file. - If ``collectionOptions`` or ``databaseOptions`` is present, create the - Collection or Database object with the provided options, respectively. - Otherwise create the object with the default options. - - Execute the named method on the provided ``object``, passing the - arguments listed. Pass ``session0`` or ``session1`` to the method, - depending on which session's name is in the arguments list. - If ``arguments`` contains no "session", pass no explicit session to the - method. - - If the driver throws an exception / returns an error while executing this - series of operations, store the error message and server error code. - - If the operation's ``error`` field is ``true``, verify that the method - threw an exception or returned an error. - - If the result document has an "errorContains" field, verify that the - method threw an exception or returned an error, and that the value of the - "errorContains" field matches the error string. "errorContains" is a - substring (case-insensitive) of the actual error message. - - If the result document has an "errorCodeName" field, verify that the - method threw a command failed exception or returned an error, and that - the value of the "errorCodeName" field matches the "codeName" in the - server error response. - - If the result document has an "errorLabelsContain" field, verify that the - method threw an exception or returned an error. Verify that all of the - error labels in "errorLabelsContain" are present in the error or exception - using the ``hasErrorLabel`` method. - - If the result document has an "errorLabelsOmit" field, verify that the - method threw an exception or returned an error. Verify that none of the - error labels in "errorLabelsOmit" are present in the error or exception - using the ``hasErrorLabel`` method. - - If the operation returns a raw command response, eg from ``runCommand``, - then compare only the fields present in the expected result document. - Otherwise, compare the method's return value to ``result`` using the same - logic as the CRUD Spec Tests runner. - -#. Call ``session0.endSession()`` and ``session1.endSession``. -#. If the test includes a list of command-started events in ``expectations``, - compare them to the actual command-started events using the - same logic as the Command Monitoring Spec Tests runner, plus the rules in - the Command-Started Events instructions below. -#. If ``failPoint`` is specified, disable the fail point to avoid spurious - failures in subsequent tests. The fail point may be disabled like so:: - - db.adminCommand({ - configureFailPoint: , - mode: "off" - }); - -#. For each element in ``outcome``: - - - If ``name`` is "collection", verify that the test collection contains - exactly the documents in the ``data`` array. Ensure this find reads the - latest data by using **primary read preference** with - **local read concern** even when the MongoClient is configured with - another read preference or read concern. - Note the server does not guarantee that documents returned by a find - command will be in inserted order. This find MUST sort by ``{_id:1}``. - -.. _SERVER-38335: https://jira.mongodb.org/browse/SERVER-38335 - -Special Test Operations -``````````````````````` - -Certain operations that appear in the "operations" array do not correspond to -API methods but instead represent special test operations. Such operations are -defined on the "testRunner" object and documented here: - -targetedFailPoint -~~~~~~~~~~~~~~~~~ - -The "targetedFailPoint" operation instructs the test runner to configure a fail -point on a specific mongos. The mongos to run the ``configureFailPoint`` is -determined by the "session" argument (either "session0" or "session1"). -The session must already be pinned to a mongos server. The "failPoint" argument -is the ``configureFailPoint`` command to run. - -If a test uses ``targetedFailPoint``, disable the fail point after running -all ``operations`` to avoid spurious failures in subsequent tests. The fail -point may be disabled like so:: - - db.adminCommand({ - configureFailPoint: , - mode: "off" - }); - -Here is an example which instructs the test runner to enable the failCommand -fail point on the mongos server which "session0" is pinned to:: - - # Enable the fail point only on the Mongos that session0 is pinned to. - - name: targetedFailPoint - object: testRunner - arguments: - session: session0 - failPoint: - configureFailPoint: failCommand - mode: { times: 1 } - data: - failCommands: ["commitTransaction"] - closeConnection: true - -Tests that use the "targetedFailPoint" operation do not include -``configureFailPoint`` commands in their command expectations. Drivers MUST -ensure that ``configureFailPoint`` commands do not appear in the list of logged -commands, either by manually filtering it from the list of observed commands or -by using a different MongoClient to execute ``configureFailPoint``. - -assertSessionTransactionState -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The "assertSessionTransactionState" operation instructs the test runner to -assert that the transaction state of the given session is equal to the -specified value. The possible values are as follows: ``none``, ``starting``, -``in_progress``, ``committed``, ``aborted``:: - - - name: assertSessionTransactionState - object: testRunner - arguments: - session: session0 - state: in_progress - -assertSessionPinned -~~~~~~~~~~~~~~~~~~~ - -The "assertSessionPinned" operation instructs the test runner to assert that -the given session is pinned to a mongos:: - - - name: assertSessionPinned - object: testRunner - arguments: - session: session0 - -assertSessionUnpinned -~~~~~~~~~~~~~~~~~~~~~ - -The "assertSessionUnpinned" operation instructs the test runner to assert that -the given session is not pinned to a mongos:: - - - name: assertSessionPinned - object: testRunner - arguments: - session: session0 - -assertCollectionExists -~~~~~~~~~~~~~~~~~~~~~~ - -The "assertCollectionExists" operation instructs the test runner to assert that -the given collection exists in the database:: - - - name: assertCollectionExists - object: testRunner - arguments: - database: db - collection: test - -Use a ``listCollections`` command to check whether the collection exists. Note -that it is currently not possible to run ``listCollections`` from within a -transaction. - -assertCollectionNotExists -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The "assertCollectionNotExists" operation instructs the test runner to assert -that the given collection does not exist in the database:: - - - name: assertCollectionNotExists - object: testRunner - arguments: - database: db - collection: test - -Use a ``listCollections`` command to check whether the collection exists. Note -that it is currently not possible to run ``listCollections`` from within a -transaction. - -assertIndexExists -~~~~~~~~~~~~~~~~~ - -The "assertIndexExists" operation instructs the test runner to assert that the -index with the given name exists on the collection:: - - - name: assertIndexExists - object: testRunner - arguments: - database: db - collection: test - index: t_1 - -Use a ``listIndexes`` command to check whether the index exists. Note that it is -currently not possible to run ``listIndexes`` from within a transaction. - -assertIndexNotExists -~~~~~~~~~~~~~~~~~~~~ - -The "assertIndexNotExists" operation instructs the test runner to assert that -the index with the given name does not exist on the collection:: - - - name: assertIndexNotExists - object: testRunner - arguments: - database: db - collection: test - index: t_1 - -Use a ``listIndexes`` command to check whether the index exists. Note that it is -currently not possible to run ``listIndexes`` from within a transaction. - -Command-Started Events -`````````````````````` - -The event listener used for these tests MUST ignore the security commands -listed in the Command Monitoring Spec. - -Logical Session Id -~~~~~~~~~~~~~~~~~~ - -Each command-started event in ``expectations`` includes an ``lsid`` with the -value "session0" or "session1". Tests MUST assert that the command's actual -``lsid`` matches the id of the correct ClientSession named ``session0`` or -``session1``. - -Null Values -~~~~~~~~~~~ - -Some command-started events in ``expectations`` include ``null`` values for -fields such as ``txnNumber``, ``autocommit``, and ``writeConcern``. -Tests MUST assert that the actual command **omits** any field that has a -``null`` value in the expected command. - -Cursor Id -^^^^^^^^^ - -A ``getMore`` value of ``"42"`` in a command-started event is a fake cursorId -that MUST be ignored. (In the Command Monitoring Spec tests, fake cursorIds are -correlated with real ones, but that is not necessary for Transactions Spec -tests.) - -afterClusterTime -^^^^^^^^^^^^^^^^ - -A ``readConcern.afterClusterTime`` value of ``42`` in a command-started event -is a fake cluster time. Drivers MUST assert that the actual command includes an -afterClusterTime. - -recoveryToken -^^^^^^^^^^^^^ - -A ``recoveryToken`` value of ``42`` in a command-started event is a -placeholder for an arbitrary recovery token. Drivers MUST assert that the -actual command includes a "recoveryToken" field and SHOULD assert that field -is a BSON document. - -Mongos Pinning Prose Tests -========================== - -The following tests ensure that a ClientSession is properly unpinned after -a sharded transaction. Initialize these tests with a MongoClient connected -to multiple mongoses. - -These tests use a cursor's address field to track which server an operation -was run on. If this is not possible in your driver, use command monitoring -instead. - -#. Test that starting a new transaction on a pinned ClientSession unpins the - session and normal server selection is performed for the next operation. - - .. code:: python - - @require_server_version(4, 1, 6) - @require_mongos_count_at_least(2) - def test_unpin_for_next_transaction(self): - # Increase localThresholdMS and wait until both nodes are discovered - # to avoid false positives. - client = MongoClient(mongos_hosts, localThresholdMS=1000) - wait_until(lambda: len(client.nodes) > 1) - # Create the collection. - client.test.test.insert_one({}) - with client.start_session() as s: - # Session is pinned to Mongos. - with s.start_transaction(): - client.test.test.insert_one({}, session=s) - - addresses = set() - for _ in range(50): - with s.start_transaction(): - cursor = client.test.test.find({}, session=s) - assert next(cursor) - addresses.add(cursor.address) - - assert len(addresses) > 1 - -#. Test non-transaction operations using a pinned ClientSession unpins the - session and normal server selection is performed. - - .. code:: python - - @require_server_version(4, 1, 6) - @require_mongos_count_at_least(2) - def test_unpin_for_non_transaction_operation(self): - # Increase localThresholdMS and wait until both nodes are discovered - # to avoid false positives. - client = MongoClient(mongos_hosts, localThresholdMS=1000) - wait_until(lambda: len(client.nodes) > 1) - # Create the collection. - client.test.test.insert_one({}) - with client.start_session() as s: - # Session is pinned to Mongos. - with s.start_transaction(): - client.test.test.insert_one({}, session=s) - - addresses = set() - for _ in range(50): - cursor = client.test.test.find({}, session=s) - assert next(cursor) - addresses.add(cursor.address) - - assert len(addresses) > 1 - -Q & A -===== - -Why do some tests appear to hang for 60 seconds on a sharded cluster? -````````````````````````````````````````````````````````````````````` - -There are two cases where this can happen. When the initial commitTransaction -attempt fails on mongos A and is retried on mongos B, mongos B will block -waiting for the transaction to complete. However because the initial commit -attempt failed, the command will only complete after the transaction is -automatically aborted for exceeding the shard's -transactionLifetimeLimitSeconds setting. `SERVER-39726`_ requests that -recovering the outcome of an uncommitted transaction should immediately abort -the transaction. - -The second case is when a *single-shard* transaction is committed successfully -on mongos A and then explicitly committed again on mongos B. Mongos B will also -block until the transactionLifetimeLimitSeconds timeout is hit at which point -``{ok:1}`` will be returned. `SERVER-39349`_ requests that recovering the -outcome of a completed single-shard transaction should not block. -Note that this test suite only includes single shard transactions. - -To workaround these issues, drivers SHOULD decrease the transaction timeout -setting by running setParameter **on each shard**. Setting the timeout to 3 -seconds significantly speeds up the test suite without a high risk of -prematurely timing out any tests' transactions. To decrease the timeout, run:: - - db.adminCommand( { setParameter: 1, transactionLifetimeLimitSeconds: 3 } ) - -Note that mongo-orchestration >=0.6.13 automatically sets this timeout to 3 -seconds so drivers using mongo-orchestration do not need to run these commands -manually. - -.. _SERVER-39726: https://jira.mongodb.org/browse/SERVER-39726 - -.. _SERVER-39349: https://jira.mongodb.org/browse/SERVER-39349 - -Why do tests that run distinct sometimes fail with StaleDbVersion? -`````````````````````````````````````````````````````````````````` - -When a shard receives its first command that contains a dbVersion, the shard -returns a StaleDbVersion error and the Mongos retries the operation. In a -sharded transaction, Mongos does not retry these operations and instead returns -the error to the client. For example:: - - Command distinct failed: Transaction aa09e296-472a-494f-8334-48d57ab530b6:1 was aborted on statement 0 due to: an error from cluster data placement change :: caused by :: got stale databaseVersion response from shard sh01 at host localhost:27217 :: caused by :: don't know dbVersion. - -To workaround this limitation, a driver test runner MUST run a -non-transactional ``distinct`` command on each Mongos before running any test -that uses ``distinct``. To ease the implementation drivers can simply run -``distinct`` before *every* test. - -Note that drivers can remove this workaround once `SERVER-39704`_ is resolved -so that mongos retries this operation transparently. The ``distinct`` command -is the only command allowed in a sharded transaction that uses the -``dbVersion`` concept so it is the only command affected. - -.. _SERVER-39704: https://jira.mongodb.org/browse/SERVER-39704 - -Changelog -========= - -:2019-05-15: Add operation level ``error`` field to assert any error. -:2019-03-25: Add workaround for StaleDbVersion on distinct. -:2019-03-01: Add top-level ``runOn`` field to denote server version and/or - topology requirements requirements for the test file. Removes the - ``topology`` top-level field, which is now expressed within - ``runOn`` elements. -:2019-02-28: ``useMultipleMongoses: true`` and non-targeted fail points are - mutually exclusive. -:2019-02-13: Modify test format for 4.2 sharded transactions, including - "useMultipleMongoses", ``object: testRunner``, the - ``targetedFailPoint`` operation, and recoveryToken assertions. diff --git a/test/spec/uri-options/README.rst b/test/spec/uri-options/README.rst deleted file mode 100644 index f6a128bba93..00000000000 --- a/test/spec/uri-options/README.rst +++ /dev/null @@ -1,54 +0,0 @@ -======================= -URI Options Tests -======================= - -The YAML and JSON files in this directory tree are platform-independent tests -that drivers can use to prove their conformance to the URI Options spec. - -These tests use the same format as the Connection String spec tests. - -Version -------- - -Files in the "specifications" repository have no version scheme. They are not -tied to a MongoDB server version. - -Format ------- - -Each YAML file contains an object with a single ``tests`` key. This key is an -array of test case objects, each of which have the following keys: - -- ``description``: A string describing the test. -- ``uri``: A string containing the URI to be parsed. -- ``valid``: A boolean indicating if the URI should be considered valid. -- ``warning``: A boolean indicating whether URI parsing should emit a warning. -- ``hosts``: Included for compatibility with the Connection String spec tests. This will always be ``~``. -- ``auth``: Included for compatibility with the Connection String spec tests. This will always be ``~``. -- ``options``: An object containing key/value pairs for each parsed query string - option. - -If a test case includes a null value for one of these keys (e.g. ``auth: ~``, -``hosts: ~``), no assertion is necessary. This both simplifies parsing of the -test files (keys should always exist) and allows flexibility for drivers that -might substitute default values *during* parsing (e.g. omitted ``hosts`` could be -parsed as ``["localhost"]``). - -The ``valid`` and ``warning`` fields are boolean in order to keep the tests -flexible. We are not concerned with asserting the format of specific error or -warnings messages strings. - -Under normal circumstances, it should not be necessary to specify both -``valid: false`` and ``warning: true``. Typically, a URI test case will either -yield an error (e.g. options conflict) or a warning (e.g. invalid type or value -for an option), but not both. - -Use as unit tests -================= - -Testing whether a URI is valid or not requires testing whether URI parsing (or -MongoClient construction) causes a warning due to a URI option being invalid and asserting that the -options parsed from the URI match those listed in the ``options`` field. - -Note that there are tests for each of the options marked as optional; drivers will need to implement -logic to skip over the optional tests that they don’t implement. diff --git a/test/spec/versioned-api/README.rst b/test/spec/versioned-api/README.rst deleted file mode 100644 index a0b0599f643..00000000000 --- a/test/spec/versioned-api/README.rst +++ /dev/null @@ -1,37 +0,0 @@ -=================== -Versioned API Tests -=================== - -.. contents:: - ----- - -Notes -===== - -This directory contains tests for the Versioned API specification. They are -implemented in the `Unified Test Format <../../unified-test-format/unified-test-format.rst>`__, -and require schema version 1.1. Note that to run these tests, the server must be -started with both ``enableTestCommands`` and ``acceptApiVersion2`` parameters -set to true. - -Testing with required API version -================================= - -Drivers MUST run their test suite against a cluster with the -``requireApiVersion`` parameter enabled and also requires authentication. - -To run this test, proceed as follows: -- Start a standalone mongod instance - -- Connect to the standalone instance and run the following command on the - ``admin`` database: ``{ setParameter: 1, requireApiVersion: true }`` - -- Declare an API version for the test run through the ``MONGODB_API_VERSION`` - environment variable. - -- If the environment variable is set, all clients created in tests MUST declare - the ``ServerApiVersion`` specified. - -No other topologies must be tested until ``mongo-orchestration`` can handle -servers with ``requireApiVersion`` enabled.