File: bulk_write.rb

package info (click to toggle)
ruby-mongo 2.23.0-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 15,020 kB
  • sloc: ruby: 110,810; makefile: 5
file content (410 lines) | stat: -rw-r--r-- 14,441 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
# frozen_string_literal: true
# rubocop:todo all

# Copyright (C) 2014-2020 MongoDB Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

require 'mongo/bulk_write/result'
require 'mongo/bulk_write/transformable'
require 'mongo/bulk_write/validatable'
require 'mongo/bulk_write/combineable'
require 'mongo/bulk_write/ordered_combiner'
require 'mongo/bulk_write/unordered_combiner'
require 'mongo/bulk_write/result_combiner'

module Mongo
  class BulkWrite
    extend Forwardable
    include Operation::ResponseHandling

    # @return [ Mongo::Collection ] collection The collection.
    attr_reader :collection

    # @return [ Array<Hash, BSON::Document> ] requests The requests.
    attr_reader :requests

    # @return [ Hash, BSON::Document ] options The options.
    attr_reader :options

    # Delegate various methods to the collection.
    def_delegators :@collection,
                   :database,
                   :cluster,
                   :write_with_retry,
                   :nro_write_with_retry,
                   :next_primary

    def_delegators :database, :client

    # Execute the bulk write operation.
    #
    # @example Execute the bulk write.
    #   bulk_write.execute
    #
    # @return [ Mongo::BulkWrite::Result ] The result.
    #
    # @since 2.1.0
    def execute
      operation_id = Monitoring.next_operation_id
      result_combiner = ResultCombiner.new
      operations = op_combiner.combine
      validate_requests!
      deadline = calculate_deadline

      client.with_session(@options) do |session|
        operations.each do |operation|
          context = Operation::Context.new(
            client: client,
            session: session,
            operation_timeouts: { operation_timeout_ms: op_timeout_ms(deadline) }
          )
          if single_statement?(operation)
            write_concern = write_concern(session)
            write_with_retry(write_concern, context: context) do |connection, txn_num, context|
              execute_operation(
                operation.keys.first,
                operation.values.flatten,
                connection,
                context,
                operation_id,
                result_combiner,
                session,
                txn_num)
            end
          else
            nro_write_with_retry(write_concern, context: context) do |connection, txn_num, context|
              execute_operation(
                operation.keys.first,
                operation.values.flatten,
                connection,
                context,
                operation_id,
                result_combiner,
                session)
            end
          end
        end
      end
      result_combiner.result
    end

    # Create the new bulk write operation.
    #
    # @api private
    #
    # @example Create an ordered bulk write.
    #   Mongo::BulkWrite.new(collection, [{ insert_one: { _id: 1 }}])
    #
    # @example Create an unordered bulk write.
    #   Mongo::BulkWrite.new(collection, [{ insert_one: { _id: 1 }}], ordered: false)
    #
    # @example Create an ordered mixed bulk write.
    #   Mongo::BulkWrite.new(
    #     collection,
    #     [
    #       { insert_one: { _id: 1 }},
    #       { update_one: { filter: { _id: 0 }, update: { '$set' => { name: 'test' }}}},
    #       { delete_one: { filter: { _id: 2 }}}
    #     ]
    #   )
    #
    # @param [ Mongo::Collection ] collection The collection.
    # @param [ Enumerable<Hash, BSON::Document> ] requests The requests,
    #   cannot be empty.
    # @param [ Hash, BSON::Document ] options The options.
    #
    # @since 2.1.0
    def initialize(collection, requests, options = {})
      @collection = collection
      @requests = requests
      @options = options || {}
      if @options[:timeout_ms] && @options[:timeout_ms] < 0
        raise ArgumentError, "timeout_ms options must be non-negative integer"
      end
    end

    # Is the bulk write ordered?
    #
    # @api private
    #
    # @example Is the bulk write ordered?
    #   bulk_write.ordered?
    #
    # @return [ true, false ] If the bulk write is ordered.
    #
    # @since 2.1.0
    def ordered?
      @ordered ||= options.fetch(:ordered, true)
    end

    # Get the write concern for the bulk write.
    #
    # @api private
    #
    # @example Get the write concern.
    #   bulk_write.write_concern
    #
    # @return [ WriteConcern ] The write concern.
    #
    # @since 2.1.0
    def write_concern(session = nil)
      @write_concern ||= options[:write_concern] ?
        WriteConcern.get(options[:write_concern]) :
        collection.write_concern_with_session(session)
    end

    private

    SINGLE_STATEMENT_OPS = [ :delete_one,
                             :update_one,
                             :insert_one ].freeze

    # @return [ Float | nil ] Deadline for the batch of operations, if set.
    def calculate_deadline
      timeout_ms = @options[:timeout_ms] || collection.timeout_ms
      return nil if timeout_ms.nil?

      if timeout_ms == 0
        0
      else
        Utils.monotonic_time + (timeout_ms / 1_000.0)
      end
    end

    # @param [ Float | nil ] deadline Deadline for the batch of operations.
    #
    # @return [ Integer | nil ] Timeout in milliseconds for the next operation.
    def op_timeout_ms(deadline)
      return nil if deadline.nil?

      if deadline == 0
        0
      else
        ((deadline - Utils.monotonic_time) * 1_000).to_i
      end
    end

    def single_statement?(operation)
      SINGLE_STATEMENT_OPS.include?(operation.keys.first)
    end

    def base_spec(operation_id, session)
      {
        :db_name => database.name,
        :coll_name => collection.name,
        :write_concern => write_concern(session),
        :ordered => ordered?,
        :operation_id => operation_id,
        :bypass_document_validation => !!options[:bypass_document_validation],
        :max_time_ms => options[:max_time_ms],
        :options => options,
        :id_generator => client.options[:id_generator],
        :session => session,
        :comment => options[:comment],
        :let => options[:let],
      }
    end

    def execute_operation(name, values, connection, context, operation_id, result_combiner, session, txn_num = nil)
      validate_collation!(connection)
      validate_array_filters!(connection)
      validate_hint!(connection)

      unpin_maybe(session, connection) do
        if values.size > connection.description.max_write_batch_size
          split_execute(name, values, connection, context, operation_id, result_combiner, session, txn_num)
        else
          result = send(name, values, connection, context, operation_id, session, txn_num)

          add_server_diagnostics(connection) do
            add_error_labels(connection, context) do
              result_combiner.combine!(result, values.size)
            end
          end
        end
      end
    # With OP_MSG (3.6+ servers), the size of each section in the message
    # is independently capped at 16m and each bulk operation becomes
    # its own section. The size of the entire bulk write is limited to 48m.
    # With OP_QUERY (pre-3.6 servers), the entire bulk write is sent as a
    # single document and is thus subject to the 16m document size limit.
    # This means the splits differ between pre-3.6 and 3.6+ servers, with
    # 3.6+ servers being able to split less.
    rescue Error::MaxBSONSize, Error::MaxMessageSize => e
      raise e if values.size <= 1
      unpin_maybe(session, connection) do
        split_execute(name, values, connection, context, operation_id, result_combiner, session, txn_num)
      end
    end

    def op_combiner
      @op_combiner ||= ordered? ? OrderedCombiner.new(requests) : UnorderedCombiner.new(requests)
    end

    def split_execute(name, values, connection, context, operation_id, result_combiner, session, txn_num)
      execute_operation(name, values.shift(values.size / 2), connection, context, operation_id, result_combiner, session, txn_num)

      txn_num = session.next_txn_num if txn_num && !session.in_transaction?
      execute_operation(name, values, connection, context, operation_id, result_combiner, session, txn_num)
    end

    def delete_one(documents, connection, context, operation_id, session, txn_num)
      QueryCache.clear_namespace(collection.namespace)

      spec = base_spec(operation_id, session).merge(:deletes => documents, :txn_num => txn_num)
      Operation::Delete.new(spec).bulk_execute(connection, context: context)
    end

    def delete_many(documents, connection, context, operation_id, session, txn_num)
      QueryCache.clear_namespace(collection.namespace)

      spec = base_spec(operation_id, session).merge(:deletes => documents)
      Operation::Delete.new(spec).bulk_execute(connection, context: context)
    end

    def insert_one(documents, connection, context, operation_id, session, txn_num)
      QueryCache.clear_namespace(collection.namespace)

      spec = base_spec(operation_id, session).merge(:documents => documents, :txn_num => txn_num)
      Operation::Insert.new(spec).bulk_execute(connection, context: context)
    end

    def update_one(documents, connection, context, operation_id, session, txn_num)
      QueryCache.clear_namespace(collection.namespace)

      spec = base_spec(operation_id, session).merge(:updates => documents, :txn_num => txn_num)
      Operation::Update.new(spec).bulk_execute(connection, context: context)
    end
    alias :replace_one :update_one

    def update_many(documents, connection, context, operation_id, session, txn_num)
      QueryCache.clear_namespace(collection.namespace)

      spec = base_spec(operation_id, session).merge(:updates => documents)
      Operation::Update.new(spec).bulk_execute(connection, context: context)
    end

    private

    def validate_collation!(connection)
      if op_combiner.has_collation? && !connection.features.collation_enabled?
        raise Error::UnsupportedCollation.new
      end
    end

    def validate_array_filters!(connection)
      if op_combiner.has_array_filters? && !connection.features.array_filters_enabled?
        raise Error::UnsupportedArrayFilters.new
      end
    end

    def validate_hint!(connection)
      if op_combiner.has_hint?
        if !can_hint?(connection) && write_concern && !write_concern.acknowledged?
          raise Error::UnsupportedOption.hint_error(unacknowledged_write: true)
        elsif !connection.features.update_delete_option_validation_enabled?
          raise Error::UnsupportedOption.hint_error
        end
      end
    end

    # Loop through the requests and check if each operation is allowed to send
    # a hint for each operation on the given server version.
    #
    # For the following operations, the client can send a hint for servers >= 4.2
    # and for the rest, the client can only send it for 4.4+:
    #   - updateOne
    #   - updateMany
    #   - replaceOne
    #
    # @param [ Connection ] connection The connection object.
    #
    # @return [ true | false ] Whether the request is able to send hints for
    #   the current server version.
    def can_hint?(connection)
      gte_4_2 = connection.server.description.server_version_gte?('4.2')
      gte_4_4 = connection.server.description.server_version_gte?('4.4')
      op_combiner.requests.all? do |req|
        op = req.keys.first
        if req[op].keys.include?(:hint)
          if [:update_one, :update_many, :replace_one].include?(op)
            gte_4_2
          else
            gte_4_4
          end
        else
          true
        end
      end
    end

    # Perform the request document validation required by driver specifications.
    # This method validates the first key of each update request document to be
    # an operator (i.e. start with $) and the first key of each replacement
    # document to not be an operator (i.e. not start with $). The request document
    # may be invalid without this method flagging it as such (for example an
    # update or replacement document containing some keys which are operators
    # and some which are not), in which case the driver expects the server to
    # fail the operation with an error.
    #
    # Raise an ArgumentError if requests is empty.
    #
    # @raise [ Error::InvalidUpdateDocument, Error::InvalidReplacementDocument,
    #   ArgumentError ]
    #   if the document is invalid.
    def validate_requests!
      requests_empty = true
      @requests.each do |req|
        requests_empty = false
        if op = req.keys.first
          if [:update_one, :update_many].include?(op)
            if doc = maybe_first(req.dig(op, :update))
              if key = doc.keys&.first
                unless key.to_s.start_with?("$")
                  if Mongo.validate_update_replace
                    raise Error::InvalidUpdateDocument.new(key: key)
                  else
                    Error::InvalidUpdateDocument.warn(Logger.logger, key)
                  end
                end
              end
            end
          elsif op == :replace_one
            if key = req.dig(op, :replacement)&.keys&.first
              if key.to_s.start_with?("$")
                if Mongo.validate_update_replace
                  raise Error::InvalidReplacementDocument.new(key: key)
                else
                  Error::InvalidReplacementDocument.warn(Logger.logger, key)
                end
              end
            end
          end
        end
      end.tap do
        raise ArgumentError, "Bulk write requests cannot be empty" if requests_empty
      end
    end

    # If the given object is an array return the first element, otherwise
    # return the given object.
    #
    # @param [ Object ] obj The given object.
    #
    # @return [ Object ] The first element of the array or the given object.
    def maybe_first(obj)
      obj.is_a?(Array) ? obj.first : obj
    end
  end
end