File: bulkWrite-serverErrors.yml

package info (click to toggle)
golang-mongodb-mongo-driver 1.8.1%2Bds1-1
  • links: PTS, VCS
  • area: main
  • in suites: experimental
  • size: 18,500 kB
  • sloc: perl: 533; ansic: 491; python: 432; makefile: 187; sh: 72
file content (96 lines) | stat: -rw-r--r-- 2,856 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
description: "retryable-writes bulkWrite serverErrors"

schemaVersion: "1.0"

runOnRequirements:
  - minServerVersion: "3.6"
    topologies: [ replicaset ]

createEntities:
  - client:
      id: &client0 client0
      useMultipleMongoses: false
      observeEvents: [ commandStartedEvent ]
  - database:
      id: &database0 database0
      client: *client0
      databaseName: &databaseName retryable-writes-tests
  - collection:
      id: &collection0 collection0
      database: *database0
      collectionName: &collectionName coll

initialData:
  - collectionName: *collectionName
    databaseName: *databaseName
    documents:
      - { _id: 1, x: 11 }
      - { _id: 2, x: 22 }

tests:
  - description: "BulkWrite succeeds after retryable writeConcernError in first batch"
    runOnRequirements:
      - minServerVersion: "4.0"
        topologies: [ replicaset ]
      - minServerVersion: "4.1.7"
        topologies: [ sharded-replicaset ]
    operations:
      - name: failPoint
        object: testRunner
        arguments:
          client: *client0
          failPoint:
            configureFailPoint: failCommand
            mode: { times: 1 }
            data:
              failCommands: [ insert ]
              errorLabels: [RetryableWriteError] # top-level error labels
              writeConcernError:
                code: 91 # ShutdownInProgress
                errmsg: "Replication is being shut down"
      - name: bulkWrite
        object: *collection0
        arguments:
          requests:
            - insertOne:
                document: { _id: 3, x: 33 }
            - deleteOne:
                filter: { _id: 2 }
        expectResult:
          deletedCount: 1
          insertedCount: 1
          matchedCount: 0
          modifiedCount: 0
          upsertedCount: 0
          insertedIds: { $$unsetOrMatches: { 0: 3 } }
          upsertedIds: { }
    expectEvents:
      - client: *client0
        events:
          - commandStartedEvent:
              command:
                insert: *collectionName
                documents: [{ _id: 3, x: 33 }]
              commandName: insert
              databaseName: *databaseName
          - commandStartedEvent:
              command:
                insert: *collectionName
                documents: [{ _id: 3, x: 33 }]
              commandName: insert
              databaseName: *databaseName
          - commandStartedEvent:
              command:
                delete: *collectionName
                deletes:
                  -
                    q: { _id: 2 }
                    limit: 1
              commandName: delete
              databaseName: *databaseName
    outcome:
      - collectionName: *collectionName
        databaseName: *databaseName
        documents:
          - { _id: 1, x: 11 }
          - { _id: 3, x: 33 }  # The write was still applied