module GraphQL::Execution::Interpreter::Resolve

def self.resolve(results, dataloader)

Returns:
  • (void) -
def self.resolve(results, dataloader)
  # There might be pending jobs here that _will_ write lazies
  # into the result hash. We should run them out, so we
  # can be sure that all lazies will be present in the result hashes.
  # A better implementation would somehow interleave (or unify)
  # these approaches.
  dataloader.run
  next_results = []
  while results.any?
    result_value = results.shift
    if result_value.is_a?(Runtime::GraphQLResultHash) || result_value.is_a?(Hash)
      results.concat(result_value.values)
      next
    elsif result_value.is_a?(Runtime::GraphQLResultArray)
      results.concat(result_value.values)
      next
    elsif result_value.is_a?(Array)
      results.concat(result_value)
      next
    elsif result_value.is_a?(Lazy)
      loaded_value = result_value.value
      if loaded_value.is_a?(Lazy)
        # Since this field returned another lazy,
        # add it to the same queue
        results << loaded_value
      elsif loaded_value.is_a?(Runtime::GraphQLResultHash) || loaded_value.is_a?(Runtime::GraphQLResultArray) ||
          loaded_value.is_a?(Hash) || loaded_value.is_a?(Array)
        # Add these values in wholesale --
        # they might be modified by later work in the dataloader.
        next_results << loaded_value
      end
    end
  end
  if next_results.any?
    # Any pending data loader jobs may populate the
    # resutl arrays or result hashes accumulated in
    # `next_results``. Run those **to completion**
    # before continuing to resolve `next_results`.
    # (Just `.append_job` doesn't work if any pending
    # jobs require multiple passes.)
    dataloader.run
    dataloader.append_job { resolve(next_results, dataloader) }
  end
  nil
end

def self.resolve_all(results, dataloader)

Returns:
  • (void) -
def self.resolve_all(results, dataloader)
  dataloader.append_job { resolve(results, dataloader) }
  nil
end

def self.resolve_each_depth(lazies_at_depth, dataloader)

def self.resolve_each_depth(lazies_at_depth, dataloader)
  depths = lazies_at_depth.keys
  depths.sort!
  next_depth = depths.first
  if next_depth
    lazies = lazies_at_depth[next_depth]
    lazies_at_depth.delete(next_depth)
    if lazies.any?
      dataloader.append_job {
        lazies.each(&:value) # resolve these Lazy instances
      }
      # Run lazies _and_ dataloader, see if more are enqueued
      dataloader.run
      resolve_each_depth(lazies_at_depth, dataloader)
    end
  end
  nil
end