1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
|
# frozen_string_literal: true
module GraphQL
module Execution
class Interpreter
class Runtime
module GraphQLResult
def initialize(result_name, parent_result, is_non_null_in_parent)
@graphql_parent = parent_result
if parent_result && parent_result.graphql_dead
@graphql_dead = true
end
@graphql_result_name = result_name
@graphql_is_non_null_in_parent = is_non_null_in_parent
# Jump through some hoops to avoid creating this duplicate storage if at all possible.
@graphql_metadata = nil
end
def path
@path ||= build_path([])
end
def build_path(path_array)
graphql_result_name && path_array.unshift(graphql_result_name)
@graphql_parent ? @graphql_parent.build_path(path_array) : path_array
end
attr_accessor :graphql_dead
attr_reader :graphql_parent, :graphql_result_name, :graphql_is_non_null_in_parent
# @return [Hash] Plain-Ruby result data (`@graphql_metadata` contains Result wrapper objects)
attr_accessor :graphql_result_data
end
class GraphQLResultHash
def initialize(_result_name, _parent_result, _is_non_null_in_parent)
super
@graphql_result_data = {}
end
include GraphQLResult
attr_accessor :graphql_merged_into
def set_leaf(key, value)
# This is a hack.
# Basically, this object is merged into the root-level result at some point.
# But the problem is, some lazies are created whose closures retain reference to _this_
# object. When those lazies are resolved, they cause an update to this object.
#
# In order to return a proper top-level result, we have to update that top-level result object.
# In order to return a proper partial result (eg, for a directive), we have to update this object, too.
# Yowza.
if (t = @graphql_merged_into)
t.set_leaf(key, value)
end
@graphql_result_data[key] = value
# keep this up-to-date if it's been initialized
@graphql_metadata && @graphql_metadata[key] = value
value
end
def set_child_result(key, value)
if (t = @graphql_merged_into)
t.set_child_result(key, value)
end
@graphql_result_data[key] = value.graphql_result_data
# If we encounter some part of this response that requires metadata tracking,
# then create the metadata hash if necessary. It will be kept up-to-date after this.
(@graphql_metadata ||= @graphql_result_data.dup)[key] = value
value
end
def delete(key)
@graphql_metadata && @graphql_metadata.delete(key)
@graphql_result_data.delete(key)
end
def each
(@graphql_metadata || @graphql_result_data).each { |k, v| yield(k, v) }
end
def values
(@graphql_metadata || @graphql_result_data).values
end
def key?(k)
@graphql_result_data.key?(k)
end
def [](k)
(@graphql_metadata || @graphql_result_data)[k]
end
def merge_into(into_result)
self.each do |key, value|
case value
when GraphQLResultHash
next_into = into_result[key]
if next_into
value.merge_into(next_into)
else
into_result.set_child_result(key, value)
end
when GraphQLResultArray
# There's no special handling of arrays because currently, there's no way to split the execution
# of a list over several concurrent flows.
into_result.set_child_result(key, value)
else
# We have to assume that, since this passed the `fields_will_merge` selection,
# that the old and new values are the same.
into_result.set_leaf(key, value)
end
end
@graphql_merged_into = into_result
end
end
class GraphQLResultArray
include GraphQLResult
def initialize(_result_name, _parent_result, _is_non_null_in_parent)
super
@graphql_result_data = []
end
def graphql_skip_at(index)
# Mark this index as dead. It's tricky because some indices may already be storing
# `Lazy`s. So the runtime is still holding indexes _before_ skipping,
# this object has to coordinate incoming writes to account for any already-skipped indices.
@skip_indices ||= []
@skip_indices << index
offset_by = @skip_indices.count { |skipped_idx| skipped_idx < index}
delete_at_index = index - offset_by
@graphql_metadata && @graphql_metadata.delete_at(delete_at_index)
@graphql_result_data.delete_at(delete_at_index)
end
def set_leaf(idx, value)
if @skip_indices
offset_by = @skip_indices.count { |skipped_idx| skipped_idx < idx }
idx -= offset_by
end
@graphql_result_data[idx] = value
@graphql_metadata && @graphql_metadata[idx] = value
value
end
def set_child_result(idx, value)
if @skip_indices
offset_by = @skip_indices.count { |skipped_idx| skipped_idx < idx }
idx -= offset_by
end
@graphql_result_data[idx] = value.graphql_result_data
# If we encounter some part of this response that requires metadata tracking,
# then create the metadata hash if necessary. It will be kept up-to-date after this.
(@graphql_metadata ||= @graphql_result_data.dup)[idx] = value
value
end
def values
(@graphql_metadata || @graphql_result_data)
end
end
end
end
end
end
|