File: gridfs-delete.yml

package info (click to toggle)
ruby-mongo 2.21.3-1
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 14,764 kB
  • sloc: ruby: 108,806; makefile: 5; sh: 2
file content (152 lines) | stat: -rw-r--r-- 4,729 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
description: "timeoutMS behaves correctly for GridFS delete operations"

schemaVersion: "1.9"

runOnRequirements:
  - minServerVersion: "4.4"
    serverless: forbid  # GridFS ops can be slow on serverless.

createEntities:
  - client:
      id: &failPointClient failPointClient
      useMultipleMongoses: false
  - client:
      id: &client client
      uriOptions:
        timeoutMS: 75
      useMultipleMongoses: false
      observeEvents:
        - commandStartedEvent
  - database:
      id: &database database
      client: *client
      databaseName: &databaseName test
  - bucket:
      id: &bucket bucket
      database: *database
  - collection:
      id: &filesCollection filesCollection
      database: *database
      collectionName: &filesCollectionName fs.files
  - collection:
      id: &chunksCollection chunksCollection
      database: *database
      collectionName: &chunksCollectionName fs.chunks

initialData:
  - collectionName: *filesCollectionName
    databaseName: *databaseName
    documents:
      - _id: &fileDocumentId { $oid: "000000000000000000000005" }
        length: 8
        chunkSize: 4
        uploadDate: { $date: "1970-01-01T00:00:00.000Z" }
        filename: "length-8"
        contentType: "application/octet-stream"
        aliases: []
        metadata: {}
  - collectionName: *chunksCollectionName
    databaseName: *databaseName
    documents:
      - _id: { $oid: "000000000000000000000005" }
        files_id: *fileDocumentId
        n: 0
        data: { $binary: { base64: "ESIzRA==", subType: "00" } } # hex: 11223344
      - _id: { $oid: "000000000000000000000006" }
        files_id: *fileDocumentId
        n: 1
        data: { $binary: { base64: "ESIzRA==", subType: "00" } } # hex: 11223344

tests:
  - description: "timeoutMS can be overridden for delete"
    operations:
      - name: failPoint
        object: testRunner
        arguments:
          client: *failPointClient
          failPoint:
            configureFailPoint: failCommand
            mode: { times: 1 }
            data:
              failCommands: ["delete"]
              blockConnection: true
              blockTimeMS: 100
      - name: delete
        object: *bucket
        arguments:
          id: *fileDocumentId
          timeoutMS: 1000 # The client timeoutMS is 75ms and the operation blocks for 100ms, so 1000ms should let it succeed.

  - description: "timeoutMS applied to delete against the files collection"
    operations:
      - name: failPoint
        object: testRunner
        arguments:
          client: *failPointClient
          failPoint:
            configureFailPoint: failCommand
            mode: { times: 1 }
            data:
              failCommands: ["delete"]
              blockConnection: true
              blockTimeMS: 100
      - name: delete
        object: *bucket
        arguments:
          id: *fileDocumentId
        expectError:
          isTimeoutError: true
    expectEvents:
      - client: *client
        events:
          - commandStartedEvent:
              commandName: delete
              databaseName: *databaseName
              command:
                delete: *filesCollectionName
                maxTimeMS: { $$type: ["int", "long"] }

  - description: "timeoutMS applied to delete against the chunks collection"
    operations:
      - name: failPoint
        object: testRunner
        arguments:  
          client: *failPointClient
          failPoint:
            configureFailPoint: failCommand
            mode:
              # The first "delete" will be against the files collection, so we skip it.
              skip: 1
            data:
              failCommands: ["delete"]
              blockConnection: true
              blockTimeMS: 100
      - name: delete
        object: *bucket
        arguments:
          id: *fileDocumentId
        expectError:
          isTimeoutError: true

  # Test that drivers are not refreshing the timeout between commands. We test this by blocking both "delete" commands
  # for 50ms each. The delete should inherit timeoutMS=75 from the client/database and the server takes over 75ms
  # total, so the operation should fail.
  - description: "timeoutMS applied to entire delete, not individual parts"
    operations:
      - name: failPoint
        object: testRunner
        arguments:  
          client: *failPointClient
          failPoint:
            configureFailPoint: failCommand
            mode: { times: 2 }
            data:
              failCommands: ["delete"]
              blockConnection: true
              blockTimeMS: 50
      - name: delete
        object: *bucket
        arguments:
          id: *fileDocumentId
        expectError:
          isTimeoutError: true