diff --git a/testdata/change-streams/change-streams-clusterTime.json b/testdata/change-streams/change-streams-clusterTime.json index 55b4ae3fbc..2b09e548f1 100644 --- a/testdata/change-streams/change-streams-clusterTime.json +++ b/testdata/change-streams/change-streams-clusterTime.json @@ -28,7 +28,6 @@ "minServerVersion": "4.0.0", "topologies": [ "replicaset", - "sharded-replicaset", "load-balanced", "sharded" ], diff --git a/testdata/change-streams/change-streams-clusterTime.yml b/testdata/change-streams/change-streams-clusterTime.yml index 997d4d5761..b1d9f20e01 100644 --- a/testdata/change-streams/change-streams-clusterTime.yml +++ b/testdata/change-streams/change-streams-clusterTime.yml @@ -15,7 +15,7 @@ createEntities: runOnRequirements: - minServerVersion: "4.0.0" - topologies: [ replicaset, sharded-replicaset, load-balanced, sharded ] + topologies: [ replicaset, load-balanced, sharded ] serverless: forbid initialData: diff --git a/testdata/change-streams/change-streams-disambiguatedPaths.json b/testdata/change-streams/change-streams-disambiguatedPaths.json index dba4a4c34a..e6cc5ef66e 100644 --- a/testdata/change-streams/change-streams-disambiguatedPaths.json +++ b/testdata/change-streams/change-streams-disambiguatedPaths.json @@ -28,7 +28,6 @@ "minServerVersion": "6.1.0", "topologies": [ "replicaset", - "sharded-replicaset", "load-balanced", "sharded" ], @@ -250,4 +249,3 @@ } ] } - diff --git a/testdata/change-streams/change-streams-disambiguatedPaths.yml b/testdata/change-streams/change-streams-disambiguatedPaths.yml index 2469988cd6..9ca9abf2e3 100644 --- a/testdata/change-streams/change-streams-disambiguatedPaths.yml +++ b/testdata/change-streams/change-streams-disambiguatedPaths.yml @@ -15,7 +15,7 @@ createEntities: runOnRequirements: - minServerVersion: "6.1.0" - topologies: [ replicaset, sharded-replicaset, load-balanced, sharded ] + topologies: [ replicaset, load-balanced, sharded ] serverless: forbid initialData: @@ -101,4 +101,3 @@ tests: removedFields: { $$exists: true } truncatedArrays: { $$exists: true } disambiguatedPaths: { 'a.0.1': ['a', { $$type: 'int' }, '1'] } - diff --git a/testdata/change-streams/change-streams-errors.json b/testdata/change-streams/change-streams-errors.json index 04fe8f04f3..65e99e541e 100644 --- a/testdata/change-streams/change-streams-errors.json +++ b/testdata/change-streams/change-streams-errors.json @@ -145,7 +145,7 @@ "minServerVersion": "4.1.11", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ] } @@ -190,7 +190,7 @@ "minServerVersion": "4.2", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ] } diff --git a/testdata/change-streams/change-streams-errors.yml b/testdata/change-streams/change-streams-errors.yml index 47ad1d07ab..85133dae0a 100644 --- a/testdata/change-streams/change-streams-errors.yml +++ b/testdata/change-streams/change-streams-errors.yml @@ -74,7 +74,7 @@ tests: - description: Change Stream should error when _id is projected out runOnRequirements: - minServerVersion: "4.1.11" - topologies: [ replicaset, sharded-replicaset, load-balanced ] + topologies: [ replicaset, sharded, load-balanced ] operations: - name: createChangeStream object: *collection0 @@ -93,7 +93,7 @@ tests: - description: change stream errors on ElectionInProgress runOnRequirements: - minServerVersion: "4.2" - topologies: [ replicaset, sharded-replicaset, load-balanced ] + topologies: [ replicaset, sharded, load-balanced ] operations: - name: failPoint object: testRunner diff --git a/testdata/change-streams/change-streams-pre_and_post_images.json b/testdata/change-streams/change-streams-pre_and_post_images.json index 8beefb2bc8..e62fc03459 100644 --- a/testdata/change-streams/change-streams-pre_and_post_images.json +++ b/testdata/change-streams/change-streams-pre_and_post_images.json @@ -6,7 +6,7 @@ "minServerVersion": "6.0.0", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ], "serverless": "forbid" diff --git a/testdata/change-streams/change-streams-pre_and_post_images.yml b/testdata/change-streams/change-streams-pre_and_post_images.yml index 2f6edd9820..6bc58eaf2d 100644 --- a/testdata/change-streams/change-streams-pre_and_post_images.yml +++ b/testdata/change-streams/change-streams-pre_and_post_images.yml @@ -4,7 +4,7 @@ schemaVersion: "1.4" runOnRequirements: - minServerVersion: "6.0.0" - topologies: [ replicaset, sharded-replicaset, load-balanced ] + topologies: [ replicaset, sharded, load-balanced ] serverless: forbid createEntities: diff --git a/testdata/change-streams/change-streams-resume-allowlist.json b/testdata/change-streams/change-streams-resume-allowlist.json index b4953ec736..1ec72b432b 100644 --- a/testdata/change-streams/change-streams-resume-allowlist.json +++ b/testdata/change-streams/change-streams-resume-allowlist.json @@ -6,7 +6,7 @@ "minServerVersion": "3.6", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ], "serverless": "forbid" diff --git a/testdata/change-streams/change-streams-resume-allowlist.yml b/testdata/change-streams/change-streams-resume-allowlist.yml index 5c69e23010..c5b7a874d3 100644 --- a/testdata/change-streams/change-streams-resume-allowlist.yml +++ b/testdata/change-streams/change-streams-resume-allowlist.yml @@ -5,7 +5,7 @@ schemaVersion: "1.7" runOnRequirements: - minServerVersion: "3.6" - topologies: [ replicaset, sharded-replicaset, load-balanced ] + topologies: [ replicaset, sharded, load-balanced ] serverless: forbid createEntities: diff --git a/testdata/change-streams/change-streams-resume-errorLabels.json b/testdata/change-streams/change-streams-resume-errorLabels.json index ff44b51427..7fd70108f0 100644 --- a/testdata/change-streams/change-streams-resume-errorLabels.json +++ b/testdata/change-streams/change-streams-resume-errorLabels.json @@ -6,7 +6,7 @@ "minServerVersion": "4.3.1", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ], "serverless": "forbid" @@ -1479,10 +1479,10 @@ { "description": "change stream resumes after StaleShardVersion", "runOnRequirements": [ - { - "maxServerVersion": "6.0.99" - } - ], + { + "maxServerVersion": "6.0.99" + } + ], "operations": [ { "name": "failPoint", diff --git a/testdata/change-streams/change-streams-resume-errorLabels.yml b/testdata/change-streams/change-streams-resume-errorLabels.yml index c9ce290d66..5cc6d423a4 100644 --- a/testdata/change-streams/change-streams-resume-errorLabels.yml +++ b/testdata/change-streams/change-streams-resume-errorLabels.yml @@ -5,7 +5,7 @@ schemaVersion: "1.7" runOnRequirements: - minServerVersion: "4.3.1" - topologies: [ replicaset, sharded-replicaset, load-balanced ] + topologies: [ replicaset, sharded, load-balanced ] serverless: forbid createEntities: @@ -744,8 +744,8 @@ tests: - description: change stream resumes after StaleShardVersion runOnRequirements: - # StaleShardVersion is obsolete as of 6.1 and is no longer marked as resumable. - - maxServerVersion: "6.0.99" + # StaleShardVersion is obsolete as of 6.1 and is no longer marked as resumable. + - maxServerVersion: "6.0.99" operations: - name: failPoint object: testRunner diff --git a/testdata/change-streams/change-streams-showExpandedEvents.json b/testdata/change-streams/change-streams-showExpandedEvents.json index 3eed2f534a..b9594e0c1e 100644 --- a/testdata/change-streams/change-streams-showExpandedEvents.json +++ b/testdata/change-streams/change-streams-showExpandedEvents.json @@ -6,9 +6,9 @@ "minServerVersion": "6.0.0", "topologies": [ "replicaset", - "sharded-replicaset", "sharded" - ] + ], + "serverless": "forbid" } ], "createEntities": [ @@ -462,7 +462,6 @@ "runOnRequirements": [ { "topologies": [ - "sharded-replicaset", "sharded" ] } diff --git a/testdata/change-streams/change-streams-showExpandedEvents.yml b/testdata/change-streams/change-streams-showExpandedEvents.yml index c0443d0e22..e6289047bf 100644 --- a/testdata/change-streams/change-streams-showExpandedEvents.yml +++ b/testdata/change-streams/change-streams-showExpandedEvents.yml @@ -2,7 +2,8 @@ description: "change-streams-showExpandedEvents" schemaVersion: "1.7" runOnRequirements: - minServerVersion: "6.0.0" - topologies: [ replicaset, sharded-replicaset, sharded ] + topologies: [ replicaset, sharded ] + serverless: forbid createEntities: - client: id: &client0 client0 @@ -275,7 +276,8 @@ tests: - description: "when showExpandedEvents is true, shardCollection events are reported" runOnRequirements: - - topologies: [ sharded-replicaset, sharded ] + # Note: minServerVersion is specified in top-level runOnRequirements + - topologies: [ sharded ] operations: - name: dropCollection object: *shardedDb diff --git a/testdata/client-side-operations-timeout/command-execution.json b/testdata/client-side-operations-timeout/command-execution.json index 013ebd212e..d4f9bb1bdc 100644 --- a/testdata/client-side-operations-timeout/command-execution.json +++ b/testdata/client-side-operations-timeout/command-execution.json @@ -7,7 +7,6 @@ "topologies": [ "single", "replicaset", - "sharded-replicaset", "sharded" ], "serverless": "forbid" diff --git a/testdata/client-side-operations-timeout/error-transformations.json b/testdata/client-side-operations-timeout/error-transformations.json index 6a603ff07f..4889e39583 100644 --- a/testdata/client-side-operations-timeout/error-transformations.json +++ b/testdata/client-side-operations-timeout/error-transformations.json @@ -11,7 +11,6 @@ { "minServerVersion": "4.2", "topologies": [ - "replicaset", "sharded" ] } @@ -27,7 +26,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ diff --git a/testdata/client-side-operations-timeout/error-transformations.yml b/testdata/client-side-operations-timeout/error-transformations.yml index 584d194e78..7bff4776a8 100644 --- a/testdata/client-side-operations-timeout/error-transformations.yml +++ b/testdata/client-side-operations-timeout/error-transformations.yml @@ -7,7 +7,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: ["replicaset"] - minServerVersion: "4.2" - topologies: ["replicaset", "sharded"] + topologies: ["sharded"] createEntities: - client: @@ -16,7 +16,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent diff --git a/testdata/client-side-operations-timeout/global-timeoutMS.json b/testdata/client-side-operations-timeout/global-timeoutMS.json index e2dd10480a..740bbad2e2 100644 --- a/testdata/client-side-operations-timeout/global-timeoutMS.json +++ b/testdata/client-side-operations-timeout/global-timeoutMS.json @@ -1,12 +1,12 @@ { "description": "timeoutMS can be configured on a MongoClient", - "schemaVersion": "1.5", + "schemaVersion": "1.9", "runOnRequirements": [ { "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -38,7 +38,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -71,14 +71,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "listDatabases" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -217,7 +217,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -250,14 +250,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "listDatabases" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -265,9 +265,6 @@ { "name": "listDatabaseNames", "object": "client", - "arguments": { - "filter": {} - }, "expectError": { "isTimeoutError": true } @@ -358,10 +355,7 @@ }, { "name": "listDatabaseNames", - "object": "client", - "arguments": { - "filter": {} - } + "object": "client" } ], "expectEvents": [ @@ -396,7 +390,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -429,14 +423,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "aggregate" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -575,7 +569,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -608,14 +602,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "aggregate" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -768,7 +762,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -801,14 +795,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "listCollections" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -947,7 +941,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -980,14 +974,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "listCollections" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -1126,7 +1120,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -1159,14 +1153,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "ping" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -1177,7 +1171,8 @@ "arguments": { "command": { "ping": 1 - } + }, + "commandName": "ping" }, "expectError": { "isTimeoutError": true @@ -1273,7 +1268,8 @@ "arguments": { "command": { "ping": 1 - } + }, + "commandName": "ping" } } ], @@ -1309,7 +1305,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -1342,14 +1338,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "aggregate" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -1488,7 +1484,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -1521,14 +1517,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "aggregate" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -1667,7 +1663,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -1700,14 +1696,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "count" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -1846,7 +1842,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -1879,14 +1875,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "aggregate" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -2025,7 +2021,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -2058,14 +2054,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "count" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -2198,7 +2194,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -2231,14 +2227,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "distinct" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -2379,7 +2375,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -2412,14 +2408,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "find" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -2558,7 +2554,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -2591,14 +2587,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "find" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -2737,7 +2733,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -2770,14 +2766,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "listIndexes" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -2910,7 +2906,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -2943,14 +2939,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "listIndexes" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -3083,7 +3079,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -3116,14 +3112,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "aggregate" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -3262,7 +3258,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -3295,14 +3291,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -3445,7 +3441,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -3478,14 +3474,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -3632,7 +3628,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -3665,14 +3661,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "delete" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -3811,7 +3807,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -3844,14 +3840,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "delete" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -3990,7 +3986,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -4023,14 +4019,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "update" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -4175,7 +4171,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -4208,14 +4204,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "update" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -4364,7 +4360,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -4397,14 +4393,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "update" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -4553,7 +4549,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -4586,14 +4582,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -4732,7 +4728,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -4765,14 +4761,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -4917,7 +4913,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -4950,14 +4946,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -5106,7 +5102,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -5139,14 +5135,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -5301,7 +5297,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -5334,14 +5330,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "createIndexes" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -5486,7 +5482,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -5519,14 +5515,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "dropIndexes" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } @@ -5669,7 +5665,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 250 }, "useMultipleMongoses": false, "observeEvents": [ @@ -5702,14 +5698,14 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 1 + "times": 2 }, "data": { "failCommands": [ "dropIndexes" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 350 } } } diff --git a/testdata/client-side-operations-timeout/global-timeoutMS.yml b/testdata/client-side-operations-timeout/global-timeoutMS.yml index f2ce502190..989a63f086 100644 --- a/testdata/client-side-operations-timeout/global-timeoutMS.yml +++ b/testdata/client-side-operations-timeout/global-timeoutMS.yml @@ -2,11 +2,11 @@ description: "timeoutMS can be configured on a MongoClient" -schemaVersion: "1.5" +schemaVersion: "1.9" runOnRequirements: - minServerVersion: "4.4" - topologies: ["replicaset", "sharded-replicaset"] + topologies: ["replicaset", "sharded"] createEntities: - client: @@ -22,7 +22,7 @@ tests: # For each operation, we execute two tests: # # 1. timeoutMS can be configured to a non-zero value on a MongoClient and is inherited by the operation. Each test - # constructs a client entity with timeoutMS=50 and configures a fail point to block the operation for 60ms so + # constructs a client entity with timeoutMS=250 and configures a fail point to block the operation for 350ms so # execution results in a timeout error. # # 2. timeoutMS can be set to 0 for a MongoClient. Each test constructs a client entity with timeoutMS=0 and @@ -38,7 +38,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -56,11 +56,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["listDatabases"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: listDatabases object: *client arguments: @@ -135,7 +138,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -153,16 +156,17 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["listDatabases"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: listDatabaseNames object: *client - arguments: - filter: {} - + expectError: isTimeoutError: true expectEvents: @@ -210,9 +214,7 @@ tests: blockTimeMS: 15 - name: listDatabaseNames object: *client - arguments: - filter: {} - + expectEvents: - client: *client @@ -232,7 +234,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -250,11 +252,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["aggregate"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: createChangeStream object: *client arguments: @@ -329,7 +334,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -347,11 +352,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["aggregate"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: aggregate object: *database arguments: @@ -426,7 +434,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -444,11 +452,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["listCollections"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: listCollections object: *database arguments: @@ -523,7 +534,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -541,11 +552,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["listCollections"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: listCollectionNames object: *database arguments: @@ -620,7 +634,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -638,15 +652,19 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["ping"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: runCommand object: *database arguments: command: { ping: 1 } + commandName: ping expectError: isTimeoutError: true @@ -697,6 +715,7 @@ tests: object: *database arguments: command: { ping: 1 } + commandName: ping expectEvents: @@ -717,7 +736,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -735,11 +754,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["aggregate"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: createChangeStream object: *database arguments: @@ -814,7 +836,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -832,11 +854,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["aggregate"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: aggregate object: *collection arguments: @@ -911,7 +936,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -929,11 +954,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["count"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: count object: *collection arguments: @@ -1008,7 +1036,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -1026,11 +1054,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["aggregate"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: countDocuments object: *collection arguments: @@ -1105,7 +1136,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -1123,11 +1154,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["count"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: estimatedDocumentCount object: *collection @@ -1198,7 +1232,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -1216,11 +1250,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["distinct"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: distinct object: *collection arguments: @@ -1297,7 +1334,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -1315,11 +1352,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["find"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: find object: *collection arguments: @@ -1394,7 +1434,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -1412,11 +1452,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["find"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: findOne object: *collection arguments: @@ -1491,7 +1534,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -1509,11 +1552,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["listIndexes"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: listIndexes object: *collection @@ -1584,7 +1630,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -1602,11 +1648,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["listIndexes"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: listIndexNames object: *collection @@ -1677,7 +1726,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -1695,11 +1744,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["aggregate"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: createChangeStream object: *collection arguments: @@ -1774,7 +1826,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -1792,11 +1844,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: insertOne object: *collection arguments: @@ -1871,7 +1926,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -1889,11 +1944,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: insertMany object: *collection arguments: @@ -1970,7 +2028,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -1988,11 +2046,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["delete"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: deleteOne object: *collection arguments: @@ -2067,7 +2128,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -2085,11 +2146,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["delete"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: deleteMany object: *collection arguments: @@ -2164,7 +2228,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -2182,11 +2246,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["update"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: replaceOne object: *collection arguments: @@ -2263,7 +2330,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -2281,11 +2348,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["update"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: updateOne object: *collection arguments: @@ -2362,7 +2432,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -2380,11 +2450,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["update"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: updateMany object: *collection arguments: @@ -2461,7 +2534,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -2479,11 +2552,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["findAndModify"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: findOneAndDelete object: *collection arguments: @@ -2558,7 +2634,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -2576,11 +2652,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["findAndModify"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: findOneAndReplace object: *collection arguments: @@ -2657,7 +2736,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -2675,11 +2754,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["findAndModify"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: findOneAndUpdate object: *collection arguments: @@ -2756,7 +2838,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -2774,11 +2856,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: bulkWrite object: *collection arguments: @@ -2857,7 +2942,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -2875,11 +2960,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["createIndexes"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: createIndex object: *collection arguments: @@ -2956,7 +3044,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -2974,11 +3062,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["dropIndexes"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: dropIndex object: *collection arguments: @@ -3055,7 +3146,7 @@ tests: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 250 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -3073,11 +3164,14 @@ tests: client: *failPointClient failPoint: configureFailPoint: failCommand - mode: { times: 1 } + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } data: failCommands: ["dropIndexes"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 350 - name: dropIndexes object: *collection @@ -3139,4 +3233,3 @@ tests: command: dropIndexes: *collectionName maxTimeMS: { $$exists: false } - diff --git a/testdata/client-side-operations-timeout/override-operation-timeoutMS.json b/testdata/client-side-operations-timeout/override-operation-timeoutMS.json index 896b996ee8..6fa0bd802a 100644 --- a/testdata/client-side-operations-timeout/override-operation-timeoutMS.json +++ b/testdata/client-side-operations-timeout/override-operation-timeoutMS.json @@ -6,7 +6,7 @@ "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/testdata/client-side-operations-timeout/override-operation-timeoutMS.yml b/testdata/client-side-operations-timeout/override-operation-timeoutMS.yml index 64785f8f6c..1091ec7cd5 100644 --- a/testdata/client-side-operations-timeout/override-operation-timeoutMS.yml +++ b/testdata/client-side-operations-timeout/override-operation-timeoutMS.yml @@ -6,7 +6,7 @@ schemaVersion: "1.9" runOnRequirements: - minServerVersion: "4.4" - topologies: ["replicaset", "sharded-replicaset"] + topologies: ["replicaset", "sharded"] createEntities: - client: diff --git a/testdata/client-side-operations-timeout/retryability-timeoutMS.json b/testdata/client-side-operations-timeout/retryability-timeoutMS.json index 438ba6b8d2..9daad260ef 100644 --- a/testdata/client-side-operations-timeout/retryability-timeoutMS.json +++ b/testdata/client-side-operations-timeout/retryability-timeoutMS.json @@ -11,8 +11,7 @@ { "minServerVersion": "4.2", "topologies": [ - "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -109,6 +108,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -137,7 +141,7 @@ "name": "insertOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "document": { "x": 1 } @@ -199,6 +203,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -328,6 +337,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -356,7 +370,7 @@ "name": "insertMany", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "documents": [ { "x": 1 @@ -420,6 +434,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -547,6 +566,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -575,7 +599,7 @@ "name": "deleteOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -635,6 +659,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -761,6 +790,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -789,7 +823,7 @@ "name": "replaceOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {}, "replacement": { "x": 1 @@ -852,6 +886,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -983,6 +1022,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1011,7 +1055,7 @@ "name": "updateOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {}, "update": { "$set": { @@ -1076,6 +1120,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1204,6 +1253,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1232,7 +1286,7 @@ "name": "findOneAndDelete", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -1292,6 +1346,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1418,6 +1477,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1446,7 +1510,7 @@ "name": "findOneAndReplace", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {}, "replacement": { "x": 1 @@ -1509,6 +1573,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1640,6 +1709,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1668,7 +1742,7 @@ "name": "findOneAndUpdate", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {}, "update": { "$set": { @@ -1733,6 +1807,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1869,6 +1948,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1897,7 +1981,7 @@ "name": "bulkWrite", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "requests": [ { "insertOne": { @@ -1965,6 +2049,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2096,6 +2185,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2124,7 +2218,7 @@ "name": "listDatabases", "object": "client", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -2184,6 +2278,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2304,6 +2403,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2332,7 +2436,7 @@ "name": "listDatabaseNames", "object": "client", "arguments": { - "timeoutMS": 500 + "timeoutMS": 1000 } } ], @@ -2391,6 +2495,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2513,6 +2622,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2541,7 +2655,7 @@ "name": "createChangeStream", "object": "client", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [] } } @@ -2601,6 +2715,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2731,6 +2850,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2759,7 +2883,7 @@ "name": "aggregate", "object": "database", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [ { "$listLocalSessions": {} @@ -2826,6 +2950,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2956,6 +3085,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2984,7 +3118,7 @@ "name": "listCollections", "object": "database", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -3044,6 +3178,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3167,6 +3306,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3195,7 +3339,7 @@ "name": "listCollectionNames", "object": "database", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -3255,6 +3399,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3378,6 +3527,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3406,7 +3560,7 @@ "name": "createChangeStream", "object": "database", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [] } } @@ -3466,6 +3620,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3589,6 +3748,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3617,7 +3781,7 @@ "name": "aggregate", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [] } } @@ -3677,6 +3841,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3800,6 +3969,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3828,7 +4002,7 @@ "name": "count", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -3888,6 +4062,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4011,6 +4190,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4039,7 +4223,7 @@ "name": "countDocuments", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -4099,6 +4283,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4219,6 +4408,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4247,7 +4441,7 @@ "name": "estimatedDocumentCount", "object": "collection", "arguments": { - "timeoutMS": 500 + "timeoutMS": 1000 } } ], @@ -4306,6 +4500,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4429,6 +4628,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4457,7 +4661,7 @@ "name": "distinct", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "fieldName": "x", "filter": {} } @@ -4518,6 +4722,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4642,6 +4851,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4670,7 +4884,7 @@ "name": "find", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -4730,6 +4944,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4853,6 +5072,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4881,7 +5105,7 @@ "name": "findOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -4941,6 +5165,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5061,6 +5290,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5089,7 +5323,7 @@ "name": "listIndexes", "object": "collection", "arguments": { - "timeoutMS": 500 + "timeoutMS": 1000 } } ], @@ -5148,6 +5382,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5270,6 +5509,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5298,7 +5542,7 @@ "name": "createChangeStream", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [] } } @@ -5358,6 +5602,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", diff --git a/testdata/client-side-operations-timeout/retryability-timeoutMS.yml b/testdata/client-side-operations-timeout/retryability-timeoutMS.yml index 601f9b2947..b15d22a4ba 100644 --- a/testdata/client-side-operations-timeout/retryability-timeoutMS.yml +++ b/testdata/client-side-operations-timeout/retryability-timeoutMS.yml @@ -9,7 +9,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: ["replicaset"] - minServerVersion: "4.2" - topologies: ["replicaset", "sharded-replicaset"] + topologies: ["sharded"] createEntities: - client: @@ -84,6 +84,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -100,7 +102,7 @@ tests: - name: insertOne object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 document: { x: 1 } expectEvents: @@ -125,6 +127,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -191,6 +195,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -207,7 +213,7 @@ tests: - name: insertMany object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 documents: - { x: 1 } @@ -233,6 +239,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -299,6 +307,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -315,7 +325,7 @@ tests: - name: deleteOne object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 filter: {} expectEvents: @@ -340,6 +350,8 @@ tests: delete: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -406,6 +418,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -422,7 +436,7 @@ tests: - name: replaceOne object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 filter: {} replacement: { x: 1 } @@ -448,6 +462,8 @@ tests: update: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -515,6 +531,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -531,7 +549,7 @@ tests: - name: updateOne object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 filter: {} update: { $set: { x: 1 } } @@ -557,6 +575,8 @@ tests: update: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -623,6 +643,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -639,7 +661,7 @@ tests: - name: findOneAndDelete object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 filter: {} expectEvents: @@ -664,6 +686,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -730,6 +754,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -746,7 +772,7 @@ tests: - name: findOneAndReplace object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 filter: {} replacement: { x: 1 } @@ -772,6 +798,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -839,6 +867,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -855,7 +885,7 @@ tests: - name: findOneAndUpdate object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 filter: {} update: { $set: { x: 1 } } @@ -881,6 +911,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -949,6 +981,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -965,7 +999,7 @@ tests: - name: bulkWrite object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 requests: - insertOne: document: { _id: 1 } @@ -992,6 +1026,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1059,6 +1095,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1075,7 +1113,7 @@ tests: - name: listDatabases object: *client arguments: - timeoutMS: 500 + timeoutMS: 1000 filter: {} expectEvents: @@ -1100,6 +1138,8 @@ tests: listDatabases: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1163,6 +1203,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1179,7 +1221,7 @@ tests: - name: listDatabaseNames object: *client arguments: - timeoutMS: 500 + timeoutMS: 1000 expectEvents: - client: *client @@ -1203,6 +1245,8 @@ tests: listDatabases: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1267,6 +1311,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1283,7 +1329,7 @@ tests: - name: createChangeStream object: *client arguments: - timeoutMS: 500 + timeoutMS: 1000 pipeline: [] expectEvents: @@ -1308,6 +1354,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1373,6 +1421,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1389,7 +1439,7 @@ tests: - name: aggregate object: *database arguments: - timeoutMS: 500 + timeoutMS: 1000 pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] expectEvents: @@ -1414,6 +1464,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1479,6 +1531,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1495,7 +1549,7 @@ tests: - name: listCollections object: *database arguments: - timeoutMS: 500 + timeoutMS: 1000 filter: {} expectEvents: @@ -1520,6 +1574,8 @@ tests: listCollections: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1585,6 +1641,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1601,7 +1659,7 @@ tests: - name: listCollectionNames object: *database arguments: - timeoutMS: 500 + timeoutMS: 1000 filter: {} expectEvents: @@ -1626,6 +1684,8 @@ tests: listCollections: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1691,6 +1751,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1707,7 +1769,7 @@ tests: - name: createChangeStream object: *database arguments: - timeoutMS: 500 + timeoutMS: 1000 pipeline: [] expectEvents: @@ -1732,6 +1794,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1797,6 +1861,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1813,7 +1879,7 @@ tests: - name: aggregate object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 pipeline: [] expectEvents: @@ -1838,6 +1904,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1903,6 +1971,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1919,7 +1989,7 @@ tests: - name: count object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 filter: {} expectEvents: @@ -1944,6 +2014,8 @@ tests: count: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2009,6 +2081,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2025,7 +2099,7 @@ tests: - name: countDocuments object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 filter: {} expectEvents: @@ -2050,6 +2124,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2113,6 +2189,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2129,7 +2207,7 @@ tests: - name: estimatedDocumentCount object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 expectEvents: - client: *client @@ -2153,6 +2231,8 @@ tests: count: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2218,6 +2298,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2234,7 +2316,7 @@ tests: - name: distinct object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 fieldName: x filter: {} @@ -2260,6 +2342,8 @@ tests: distinct: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2326,6 +2410,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2342,7 +2428,7 @@ tests: - name: find object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 filter: {} expectEvents: @@ -2367,6 +2453,8 @@ tests: find: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2432,6 +2520,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2448,7 +2538,7 @@ tests: - name: findOne object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 filter: {} expectEvents: @@ -2473,6 +2563,8 @@ tests: find: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2536,6 +2628,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2552,7 +2646,7 @@ tests: - name: listIndexes object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 expectEvents: - client: *client @@ -2576,6 +2670,8 @@ tests: listIndexes: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2640,6 +2736,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2656,7 +2754,7 @@ tests: - name: createChangeStream object: *collection arguments: - timeoutMS: 500 + timeoutMS: 1000 pipeline: [] expectEvents: @@ -2681,6 +2779,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2721,4 +2821,3 @@ tests: command: aggregate: *collectionName maxTimeMS: { $$exists: false } - \ No newline at end of file diff --git a/testdata/retryable-writes/unified/bulkWrite-serverErrors.json b/testdata/retryable-writes/unified/bulkWrite-serverErrors.json index 23cf2869a6..0a063ab4d9 100644 --- a/testdata/retryable-writes/unified/bulkWrite-serverErrors.json +++ b/testdata/retryable-writes/unified/bulkWrite-serverErrors.json @@ -1,12 +1,19 @@ { "description": "retryable-writes bulkWrite serverErrors", - "schemaVersion": "1.0", + "schemaVersion": "1.3", "runOnRequirements": [ { - "minServerVersion": "3.6", + "minServerVersion": "4.0", "topologies": [ "replicaset" ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] } ], "createEntities": [ @@ -55,16 +62,7 @@ "description": "BulkWrite succeeds after retryable writeConcernError in first batch", "runOnRequirements": [ { - "minServerVersion": "4.0", - "topologies": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topologies": [ - "sharded-replicaset" - ] + "minServerVersion": "4.3.1" } ], "operations": [ @@ -200,6 +198,88 @@ ] } ] + }, + { + "description": "BulkWrite fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] } ] } diff --git a/testdata/retryable-writes/unified/bulkWrite-serverErrors.yml b/testdata/retryable-writes/unified/bulkWrite-serverErrors.yml index cb67304c74..a88a206123 100644 --- a/testdata/retryable-writes/unified/bulkWrite-serverErrors.yml +++ b/testdata/retryable-writes/unified/bulkWrite-serverErrors.yml @@ -1,10 +1,12 @@ description: "retryable-writes bulkWrite serverErrors" -schemaVersion: "1.0" +schemaVersion: "1.3" runOnRequirements: - - minServerVersion: "3.6" + - minServerVersion: "4.0" topologies: [ replicaset ] + - minServerVersion: "4.1.7" + topologies: [ sharded, load-balanced ] createEntities: - client: @@ -30,10 +32,7 @@ initialData: tests: - description: "BulkWrite succeeds after retryable writeConcernError in first batch" runOnRequirements: - - minServerVersion: "4.0" - topologies: [ replicaset ] - - minServerVersion: "4.1.7" - topologies: [ sharded-replicaset ] + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -94,3 +93,44 @@ tests: documents: - { _id: 1, x: 11 } - { _id: 3, x: 33 } # The write was still applied + - + description: 'BulkWrite fails with a RetryableWriteError label after two connection failures' + operations: + - + name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: [ update ] + closeConnection: true + - + object: *collection0 + name: bulkWrite + arguments: + requests: + - + deleteOne: + filter: { _id: 1 } + - + insertOne: + document: { _id: 3, x: 33 } + - + updateOne: + filter: { _id: 2 } + update: { $inc: { x: 1 } } + ordered: true + expectError: + isError: true + errorLabelsContain: + - RetryableWriteError + outcome: + - + collectionName: *collectionName + databaseName: *databaseName + documents: + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } diff --git a/testdata/retryable-writes/unified/insertOne-serverErrors.json b/testdata/retryable-writes/unified/insertOne-serverErrors.json index 77245a8197..74d078377a 100644 --- a/testdata/retryable-writes/unified/insertOne-serverErrors.json +++ b/testdata/retryable-writes/unified/insertOne-serverErrors.json @@ -1,12 +1,19 @@ { "description": "retryable-writes insertOne serverErrors", - "schemaVersion": "1.0", + "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "3.6", + "minServerVersion": "4.0", "topologies": [ "replicaset" ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] } ], "createEntities": [ @@ -55,16 +62,7 @@ "description": "InsertOne succeeds after retryable writeConcernError", "runOnRequirements": [ { - "minServerVersion": "4.0", - "topologies": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topologies": [ - "sharded-replicaset" - ] + "minServerVersion": "4.3.1" } ], "operations": [ @@ -168,6 +166,607 @@ ] } ] + }, + { + "description": "RetryableWriteError label is added based on top-level code in pre-4.4 server response", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 189 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "RetryableWriteError label is added based on writeConcernError in pre-4.4 mongod response", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after connection failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertOne fails after connection failure when retryWrites option is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + } + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "InsertOne fails after Interrupted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11601, + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "InsertOne fails after WriteConcernError Interrupted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 11601, + "errmsg": "operation was interrupted" + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertOne fails after WriteConcernError WriteConcernFailed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 64, + "codeName": "WriteConcernFailed", + "errmsg": "waiting for replication timed out", + "errInfo": { + "wtimeout": true + } + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertOne fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] } ] } diff --git a/testdata/retryable-writes/unified/insertOne-serverErrors.yml b/testdata/retryable-writes/unified/insertOne-serverErrors.yml index 3d9672cdca..b69864d7ab 100644 --- a/testdata/retryable-writes/unified/insertOne-serverErrors.yml +++ b/testdata/retryable-writes/unified/insertOne-serverErrors.yml @@ -1,10 +1,12 @@ description: "retryable-writes insertOne serverErrors" -schemaVersion: "1.0" +schemaVersion: "1.9" runOnRequirements: - - minServerVersion: "3.6" + - minServerVersion: "4.0" topologies: [ replicaset ] + - minServerVersion: "4.1.7" + topologies: [ sharded, load-balanced ] createEntities: - client: @@ -30,10 +32,7 @@ initialData: tests: - description: "InsertOne succeeds after retryable writeConcernError" runOnRequirements: - - minServerVersion: "4.0" - topologies: [ replicaset ] - - minServerVersion: "4.1.7" - topologies: [ sharded-replicaset ] + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -76,3 +75,294 @@ tests: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } # The write was still applied + + - description: "RetryableWriteError label is added based on top-level code in pre-4.4 server response" + runOnRequirements: + - minServerVersion: "4.2" + maxServerVersion: "4.2.99" + topologies: [ replicaset, sharded ] + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + # Trigger the fail point twice to allow asserting the error label in + # the retry attempt's response. + mode: { times: 2 } + data: + failCommands: [ "insert" ] + errorCode: 189 # PrimarySteppedDown + - name: insertOne + object: *collection0 + arguments: + document: { _id: 3, x: 33 } + expectError: + errorLabelsContain: [ "RetryableWriteError" ] + expectEvents: + - client: *client0 + events: + - commandStartedEvent: &insertCommandStartedEvent + command: + insert: *collectionName + documents: [{ _id: 3, x: 33 }] + commandName: insert + databaseName: *databaseName + - commandStartedEvent: *insertCommandStartedEvent + outcome: + - collectionName: *collectionName + databaseName: *databaseName + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + + - description: "RetryableWriteError label is added based on writeConcernError in pre-4.4 mongod response" + runOnRequirements: + - minServerVersion: "4.2" + maxServerVersion: "4.2.99" + topologies: [ replicaset ] + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + # Trigger the fail point twice to allow asserting the error label in + # the retry attempt's response. + mode: { times: 2 } + data: + failCommands: [ "insert" ] + writeConcernError: + code: 91 # ShutdownInProgress + errmsg: "Replication is being shut down" + - name: insertOne + object: *collection0 + arguments: + document: { _id: 3, x: 33 } + expectError: + errorLabelsContain: [ "RetryableWriteError" ] + expectEvents: + - client: *client0 + events: + - commandStartedEvent: *insertCommandStartedEvent + - commandStartedEvent: *insertCommandStartedEvent + outcome: + - collectionName: *collectionName + databaseName: *databaseName + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + # writeConcernError doesn't prevent the server from applying the write + - { _id: 3, x: 33 } + + - + description: 'InsertOne succeeds after connection failure' + operations: + - + name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ insert ] + closeConnection: true + - + object: *collection0 + name: insertOne + arguments: + document: { _id: 3, x: 33 } + expectResult: + $$unsetOrMatches: { insertedId: { $$unsetOrMatches: 3 } } + outcome: + - + collectionName: *collectionName + databaseName: *databaseName + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - + description: 'InsertOne fails after connection failure when retryWrites option is false' + operations: + - + object: testRunner + name: createEntities + arguments: + entities: + - client: + id: &client1 client1 + useMultipleMongoses: false + uriOptions: + retryWrites: false + - database: + id: &database1 database1 + client: *client1 + databaseName: *databaseName + - collection: + id: &collection1 collection1 + database: *database1 + collectionName: *collectionName + - + name: failPoint + object: testRunner + arguments: + client: *client1 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ insert ] + closeConnection: true + - + object: *collection1 + name: insertOne + arguments: + document: { _id: 3, x: 33 } + expectError: + isError: true + # If retryWrites is false, the driver should not add the + # RetryableWriteError label to the error. + errorLabelsOmit: + - RetryableWriteError + outcome: + - + collectionName: *collectionName + databaseName: *databaseName + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - + description: 'InsertOne fails after Interrupted' + operations: + - + name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ insert ] + errorCode: 11601 + closeConnection: false + - + object: *collection0 + name: insertOne + arguments: + document: { _id: 3, x: 33 } + expectError: + isError: true + errorLabelsOmit: + - RetryableWriteError + outcome: + - + collectionName: *collectionName + databaseName: *databaseName + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - + description: 'InsertOne fails after WriteConcernError Interrupted' + operations: + - + name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ insert ] + writeConcernError: + code: 11601 + errmsg: 'operation was interrupted' + - + object: *collection0 + name: insertOne + arguments: + document: { _id: 3, x: 33 } + expectError: + isError: true + errorLabelsOmit: + - RetryableWriteError + outcome: + - + collectionName: *collectionName + databaseName: *databaseName + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } # The write was still applied. + - + description: 'InsertOne fails after WriteConcernError WriteConcernFailed' + operations: + - + name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ insert ] + writeConcernError: + code: 64 + codeName: WriteConcernFailed + errmsg: 'waiting for replication timed out' + errInfo: + wtimeout: true + - + object: *collection0 + name: insertOne + arguments: + document: { _id: 3, x: 33 } + expectError: + isError: true + errorLabelsOmit: + - RetryableWriteError + outcome: + - + collectionName: *collectionName + databaseName: *databaseName + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } # The write was still applied. + - + description: 'InsertOne fails with a RetryableWriteError label after two connection failures' + operations: + - + name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: [ insert ] + closeConnection: true + - + object: *collection0 + name: insertOne + arguments: + document: { _id: 3, x: 33 } + expectError: + isError: true + errorLabelsContain: + - RetryableWriteError + outcome: + - + collectionName: *collectionName + databaseName: *databaseName + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } diff --git a/testdata/run-command/runCommand.json b/testdata/run-command/runCommand.json index 007e514bd7..fde9de92e6 100644 --- a/testdata/run-command/runCommand.json +++ b/testdata/run-command/runCommand.json @@ -229,7 +229,6 @@ { "topologies": [ "replicaset", - "sharded-replicaset", "load-balanced", "sharded" ] @@ -493,7 +492,7 @@ { "minServerVersion": "4.2", "topologies": [ - "sharded-replicaset", + "sharded", "load-balanced" ] } diff --git a/testdata/run-command/runCommand.yml b/testdata/run-command/runCommand.yml index eaa12eff23..bc55d79205 100644 --- a/testdata/run-command/runCommand.yml +++ b/testdata/run-command/runCommand.yml @@ -119,7 +119,7 @@ tests: - description: attaches the provided $readPreference to given command runOnRequirements: # Exclude single topology, which is most likely a standalone server - - topologies: [ replicaset, sharded-replicaset, load-balanced, sharded ] + - topologies: [ replicaset, load-balanced, sharded ] operations: - name: runCommand object: *db @@ -143,7 +143,7 @@ tests: # This test assumes that the single topology contains a standalone server; # however, it is possible for a single topology to contain a direct # connection to another server type. - # See: https://github.com/mongodb/specifications/blob/master/source/server-selection/server-selection.rst#topology-type-single + # See: https://github.com/mongodb/specifications/blob/master/source/server-selection/server-selection.md#topology-type-single - topologies: [ single ] operations: - name: runCommand @@ -250,7 +250,7 @@ tests: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.2" - topologies: [ sharded-replicaset, load-balanced ] + topologies: [ sharded, load-balanced ] operations: - name: withTransaction object: *session diff --git a/testdata/sessions/driver-sessions-dirty-session-errors.json b/testdata/sessions/driver-sessions-dirty-session-errors.json index 361ea83d7b..6aa1da1df5 100644 --- a/testdata/sessions/driver-sessions-dirty-session-errors.json +++ b/testdata/sessions/driver-sessions-dirty-session-errors.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/testdata/sessions/driver-sessions-dirty-session-errors.yml b/testdata/sessions/driver-sessions-dirty-session-errors.yml index 4ef612a783..eb7978660d 100644 --- a/testdata/sessions/driver-sessions-dirty-session-errors.yml +++ b/testdata/sessions/driver-sessions-dirty-session-errors.yml @@ -6,8 +6,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - # Tests depend on retryable writes, which require a replica set - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: diff --git a/testdata/sessions/snapshot-sessions-unsupported-ops.json b/testdata/sessions/snapshot-sessions-unsupported-ops.json index 1021b7f264..c41f74d337 100644 --- a/testdata/sessions/snapshot-sessions-unsupported-ops.json +++ b/testdata/sessions/snapshot-sessions-unsupported-ops.json @@ -6,7 +6,7 @@ "minServerVersion": "5.0", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/testdata/sessions/snapshot-sessions-unsupported-ops.yml b/testdata/sessions/snapshot-sessions-unsupported-ops.yml index 1d5dce8933..91d785e47e 100644 --- a/testdata/sessions/snapshot-sessions-unsupported-ops.yml +++ b/testdata/sessions/snapshot-sessions-unsupported-ops.yml @@ -4,7 +4,7 @@ schemaVersion: "1.0" runOnRequirements: - minServerVersion: "5.0" - topologies: [replicaset, sharded-replicaset] + topologies: [replicaset, sharded] createEntities: - client: diff --git a/testdata/sessions/snapshot-sessions.json b/testdata/sessions/snapshot-sessions.json index 75b577b039..260f8b6f48 100644 --- a/testdata/sessions/snapshot-sessions.json +++ b/testdata/sessions/snapshot-sessions.json @@ -6,7 +6,7 @@ "minServerVersion": "5.0", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/testdata/sessions/snapshot-sessions.yml b/testdata/sessions/snapshot-sessions.yml index 04ffe9cd63..bcf0f7eec6 100644 --- a/testdata/sessions/snapshot-sessions.yml +++ b/testdata/sessions/snapshot-sessions.yml @@ -4,7 +4,7 @@ schemaVersion: "1.0" runOnRequirements: - minServerVersion: "5.0" - topologies: [replicaset, sharded-replicaset] + topologies: [replicaset, sharded] createEntities: - client: @@ -378,7 +378,7 @@ tests: fieldName: x filter: {} session: session0 - expectResult: [ 11 ] + expectResult: [ 11 ] expectEvents: - client: client0 events: diff --git a/testdata/transactions/unified/mongos-unpin.json b/testdata/transactions/unified/mongos-unpin.json index 4bb8548f48..4d1ebc87bc 100644 --- a/testdata/transactions/unified/mongos-unpin.json +++ b/testdata/transactions/unified/mongos-unpin.json @@ -4,9 +4,8 @@ "runOnRequirements": [ { "minServerVersion": "4.2", - "maxServerVersion": "7.99", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], @@ -53,7 +52,10 @@ "description": "unpin after TransientTransactionError error on commit", "runOnRequirements": [ { - "serverless": "forbid" + "serverless": "forbid", + "topologies": [ + "sharded" + ] } ], "operations": [ @@ -164,7 +166,10 @@ "description": "unpin after non-transient error on abort", "runOnRequirements": [ { - "serverless": "forbid" + "serverless": "forbid", + "topologies": [ + "sharded" + ] } ], "operations": [ @@ -234,6 +239,13 @@ }, { "description": "unpin after TransientTransactionError error on abort", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], "operations": [ { "name": "startTransaction", diff --git a/testdata/transactions/unified/mongos-unpin.yml b/testdata/transactions/unified/mongos-unpin.yml index 59a7895f44..44844a1858 100644 --- a/testdata/transactions/unified/mongos-unpin.yml +++ b/testdata/transactions/unified/mongos-unpin.yml @@ -4,9 +4,7 @@ schemaVersion: '1.4' runOnRequirements: - minServerVersion: '4.2' - # Skip pending GODRIVER-3113 - maxServerVersion: "7.99" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: @@ -37,9 +35,13 @@ _yamlAnchors: tests: - description: unpin after TransientTransactionError error on commit runOnRequirements: + - # serverless proxy doesn't append error labels to errors in transactions # caused by failpoints (CLOUDP-88216) - - serverless: "forbid" + serverless: "forbid" + # Note: test utilizes targetedFailPoint, which is incompatible with + # load-balanced and useMultipleMongoses:true + topologies: [ sharded ] operations: - &startTransaction name: startTransaction @@ -89,9 +91,13 @@ tests: - description: unpin after non-transient error on abort runOnRequirements: + - # serverless proxy doesn't append error labels to errors in transactions # caused by failpoints (CLOUDP-88216) - - serverless: "forbid" + serverless: "forbid" + # Note: test utilizes targetedFailPoint, which is incompatible with + # load-balanced and useMultipleMongoses:true + topologies: [ sharded ] operations: - *startTransaction - *insertOne @@ -114,6 +120,11 @@ tests: - *abortTransaction - description: unpin after TransientTransactionError error on abort + runOnRequirements: + - + # Note: test utilizes targetedFailPoint, which is incompatible with + # load-balanced and useMultipleMongoses:true + topologies: [ sharded ] operations: - *startTransaction - *insertOne diff --git a/testdata/unified-test-format/valid-pass/poc-crud.json b/testdata/unified-test-format/valid-pass/poc-crud.json index 0790d9b789..94e4ec5682 100644 --- a/testdata/unified-test-format/valid-pass/poc-crud.json +++ b/testdata/unified-test-format/valid-pass/poc-crud.json @@ -322,7 +322,7 @@ "minServerVersion": "4.1.0", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ], "serverless": "forbid" } diff --git a/testdata/unified-test-format/valid-pass/poc-crud.yml b/testdata/unified-test-format/valid-pass/poc-crud.yml index b7d05d75af..5748c0779f 100644 --- a/testdata/unified-test-format/valid-pass/poc-crud.yml +++ b/testdata/unified-test-format/valid-pass/poc-crud.yml @@ -143,7 +143,7 @@ tests: - description: "readConcern majority with out stage" runOnRequirements: - minServerVersion: "4.1.0" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] serverless: "forbid" operations: - name: aggregate diff --git a/testdata/unified-test-format/valid-pass/poc-retryable-writes.json b/testdata/unified-test-format/valid-pass/poc-retryable-writes.json index 50160799f3..f19aa3f9d8 100644 --- a/testdata/unified-test-format/valid-pass/poc-retryable-writes.json +++ b/testdata/unified-test-format/valid-pass/poc-retryable-writes.json @@ -1,14 +1,6 @@ { "description": "poc-retryable-writes", "schemaVersion": "1.0", - "runOnRequirements": [ - { - "minServerVersion": "3.6", - "topologies": [ - "replicaset" - ] - } - ], "createEntities": [ { "client": { @@ -79,6 +71,14 @@ "tests": [ { "description": "FindOneAndUpdate is committed on first attempt", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], "operations": [ { "name": "failPoint", @@ -132,6 +132,14 @@ }, { "description": "FindOneAndUpdate is not committed on first attempt", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], "operations": [ { "name": "failPoint", @@ -188,6 +196,14 @@ }, { "description": "FindOneAndUpdate is never committed", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], "operations": [ { "name": "failPoint", @@ -245,15 +261,10 @@ "description": "InsertMany succeeds after PrimarySteppedDown", "runOnRequirements": [ { - "minServerVersion": "4.0", - "topologies": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", + "minServerVersion": "4.3.1", "topologies": [ - "sharded-replicaset" + "replicaset", + "sharded" ] } ], @@ -345,7 +356,7 @@ { "minServerVersion": "4.1.7", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], @@ -406,15 +417,10 @@ "description": "InsertOne fails after multiple retryable writeConcernErrors", "runOnRequirements": [ { - "minServerVersion": "4.0", - "topologies": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", + "minServerVersion": "4.3.1", "topologies": [ - "sharded-replicaset" + "replicaset", + "sharded" ] } ], @@ -433,6 +439,9 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, "errmsg": "Replication is being shut down" diff --git a/testdata/unified-test-format/valid-pass/poc-retryable-writes.yml b/testdata/unified-test-format/valid-pass/poc-retryable-writes.yml index fa882e2836..c11e8b6ef7 100644 --- a/testdata/unified-test-format/valid-pass/poc-retryable-writes.yml +++ b/testdata/unified-test-format/valid-pass/poc-retryable-writes.yml @@ -2,10 +2,6 @@ description: "poc-retryable-writes" schemaVersion: "1.0" -runOnRequirements: - - minServerVersion: "3.6" - topologies: [ replicaset ] - createEntities: - client: id: &client0 client0 @@ -42,6 +38,9 @@ initialData: tests: - description: "FindOneAndUpdate is committed on first attempt" + runOnRequirements: &onPrimaryTransactionalWrite_requirements + - minServerVersion: "3.6" + topologies: [ replicaset ] operations: - name: failPoint object: testRunner @@ -65,6 +64,7 @@ tests: - { _id: 2, x: 22 } - description: "FindOneAndUpdate is not committed on first attempt" + runOnRequirements: *onPrimaryTransactionalWrite_requirements operations: - name: failPoint object: testRunner @@ -89,6 +89,7 @@ tests: - { _id: 2, x: 22 } - description: "FindOneAndUpdate is never committed" + runOnRequirements: *onPrimaryTransactionalWrite_requirements operations: - name: failPoint object: testRunner @@ -113,13 +114,9 @@ tests: - { _id: 2, x: 22 } - description: "InsertMany succeeds after PrimarySteppedDown" - runOnRequirements: &failCommand_requirements - - minServerVersion: "4.0" - topologies: [ replicaset ] - - minServerVersion: "4.1.7" - # Original test uses "sharded", but retryable writes requires a sharded - # cluster backed by replica sets - topologies: [ sharded-replicaset ] + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + topologies: [ replicaset, sharded ] operations: - name: failPoint object: testRunner @@ -131,7 +128,7 @@ tests: data: failCommands: [ insert ] errorCode: 189 # PrimarySteppedDown - errorLabels: [ RetryableWriteError ] + errorLabels: [ RetryableWriteError ] - name: insertMany object: *collection0 arguments: @@ -153,7 +150,11 @@ tests: - { _id: 4, x: 44 } - description: "InsertOne fails after connection failure when retryWrites option is false" - runOnRequirements: *failCommand_requirements + runOnRequirements: # failCommand + - minServerVersion: "4.0" + topologies: [ replicaset ] + - minServerVersion: "4.1.7" + topologies: [ sharded ] operations: - name: failPoint object: testRunner @@ -181,7 +182,9 @@ tests: - { _id: 2, x: 22 } - description: "InsertOne fails after multiple retryable writeConcernErrors" - runOnRequirements: *failCommand_requirements + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + topologies: [ replicaset, sharded ] operations: - name: failPoint object: testRunner @@ -192,6 +195,7 @@ tests: mode: { times: 2 } data: failCommands: [ insert ] + errorLabels: [ RetryableWriteError ] writeConcernError: code: 91 # ShutdownInProgress errmsg: "Replication is being shut down" diff --git a/testdata/unified-test-format/valid-pass/poc-sessions.json b/testdata/unified-test-format/valid-pass/poc-sessions.json index 75f3489428..117c9e7d00 100644 --- a/testdata/unified-test-format/valid-pass/poc-sessions.json +++ b/testdata/unified-test-format/valid-pass/poc-sessions.json @@ -264,7 +264,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/testdata/unified-test-format/valid-pass/poc-sessions.yml b/testdata/unified-test-format/valid-pass/poc-sessions.yml index a7efd8a6d6..ae4fb1fa92 100644 --- a/testdata/unified-test-format/valid-pass/poc-sessions.yml +++ b/testdata/unified-test-format/valid-pass/poc-sessions.yml @@ -124,12 +124,11 @@ tests: - description: "Dirty explicit session is discarded" # Original test specified retryWrites=true, but that is now the default. - # Retryable writes will require a sharded-replicaset, though. runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] operations: - name: failPoint object: testRunner diff --git a/testdata/unified-test-format/valid-pass/poc-transactions-convenient-api.json b/testdata/unified-test-format/valid-pass/poc-transactions-convenient-api.json index 820ed65927..9ab44a9c54 100644 --- a/testdata/unified-test-format/valid-pass/poc-transactions-convenient-api.json +++ b/testdata/unified-test-format/valid-pass/poc-transactions-convenient-api.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/testdata/unified-test-format/valid-pass/poc-transactions-convenient-api.yml b/testdata/unified-test-format/valid-pass/poc-transactions-convenient-api.yml index 4f981d15dd..94fadda0aa 100644 --- a/testdata/unified-test-format/valid-pass/poc-transactions-convenient-api.yml +++ b/testdata/unified-test-format/valid-pass/poc-transactions-convenient-api.yml @@ -6,7 +6,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: diff --git a/testdata/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json b/testdata/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json index a0b297d59a..de08edec44 100644 --- a/testdata/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json +++ b/testdata/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json @@ -5,7 +5,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/testdata/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml b/testdata/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml index 47db7c3188..33cd2a2521 100644 --- a/testdata/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml +++ b/testdata/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml @@ -4,7 +4,7 @@ schemaVersion: "1.0" runOnRequirements: - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: diff --git a/testdata/unified-test-format/valid-pass/poc-transactions.json b/testdata/unified-test-format/valid-pass/poc-transactions.json index 0355ca2060..2055a3b705 100644 --- a/testdata/unified-test-format/valid-pass/poc-transactions.json +++ b/testdata/unified-test-format/valid-pass/poc-transactions.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], @@ -93,7 +93,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -203,7 +203,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/testdata/unified-test-format/valid-pass/poc-transactions.yml b/testdata/unified-test-format/valid-pass/poc-transactions.yml index 0a66b9bd7f..8a12c8b39a 100644 --- a/testdata/unified-test-format/valid-pass/poc-transactions.yml +++ b/testdata/unified-test-format/valid-pass/poc-transactions.yml @@ -6,7 +6,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: @@ -51,7 +51,7 @@ tests: - description: "explicitly create collection using create command" runOnRequirements: - minServerVersion: "4.3.4" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] operations: - name: dropCollection object: *database0 @@ -109,7 +109,7 @@ tests: - description: "create index on a non-existing collection" runOnRequirements: - minServerVersion: "4.3.4" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] operations: - name: dropCollection object: *database0 diff --git a/testdata/versioned-api/transaction-handling.json b/testdata/versioned-api/transaction-handling.json index 5c627bb351..32031296af 100644 --- a/testdata/versioned-api/transaction-handling.json +++ b/testdata/versioned-api/transaction-handling.json @@ -1,12 +1,12 @@ { "description": "Transaction handling", - "schemaVersion": "1.1", + "schemaVersion": "1.3", "runOnRequirements": [ { "minServerVersion": "4.9", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ] } @@ -92,7 +92,7 @@ { "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ] } @@ -221,7 +221,7 @@ { "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ] } diff --git a/testdata/versioned-api/transaction-handling.yml b/testdata/versioned-api/transaction-handling.yml index 5f723c0737..bc7cc318dc 100644 --- a/testdata/versioned-api/transaction-handling.yml +++ b/testdata/versioned-api/transaction-handling.yml @@ -1,10 +1,10 @@ description: "Transaction handling" -schemaVersion: "1.1" +schemaVersion: "1.3" runOnRequirements: - minServerVersion: "4.9" - topologies: [ replicaset, sharded-replicaset, load-balanced ] + topologies: [ replicaset, sharded, load-balanced ] createEntities: - client: @@ -46,7 +46,7 @@ initialData: tests: - description: "All commands in a transaction declare an API version" runOnRequirements: - - topologies: [ replicaset, sharded-replicaset, load-balanced ] + - topologies: [ replicaset, sharded, load-balanced ] operations: - name: startTransaction object: *session @@ -87,7 +87,7 @@ tests: <<: *expectedApiVersion - description: "abortTransaction includes an API version" runOnRequirements: - - topologies: [ replicaset, sharded-replicaset, load-balanced ] + - topologies: [ replicaset, sharded, load-balanced ] operations: - name: startTransaction object: *session