class Concurrent::Collection::AtomicReferenceMapBackend

def rebuild(table)

Moves and/or copies the nodes in each bin to new table. See above for explanation.
def rebuild(table)
  old_table_size = table.size
  new_table      = table.next_in_size_table
  # puts "#{old_table_size} -> #{new_table.size}"
  forwarder      = Node.new(MOVED, new_table, NULL)
  rev_forwarder  = nil
  locked_indexes = nil # holds bins to revisit; nil until needed
  locked_arr_idx = 0
  bin            = old_table_size - 1
  i              = bin
  while true
    if !(node = table.volatile_get(i))
      # no lock needed (or available) if bin >= 0, because we're not popping values from locked_indexes until we've run through the whole table
      redo unless (bin >= 0 ? table.cas(i, nil, forwarder) : lock_and_clean_up_reverse_forwarders(table, old_table_size, new_table, i, forwarder))
    elsif Node.locked_hash?(node_hash = node.hash)
      locked_indexes ||= ::Array.new
      if bin < 0 && locked_arr_idx > 0
        locked_arr_idx -= 1
        i, locked_indexes[locked_arr_idx] = locked_indexes[locked_arr_idx], i # swap with another bin
        redo
      end
      if bin < 0 || locked_indexes.size >= TRANSFER_BUFFER_SIZE
        node.try_await_lock(table, i) # no other options -- block
        redo
      end
      rev_forwarder ||= Node.new(MOVED, table, NULL)
      redo unless table.volatile_get(i) == node && node.locked? # recheck before adding to list
      locked_indexes << i
      new_table.volatile_set(i, rev_forwarder)
      new_table.volatile_set(i + old_table_size, rev_forwarder)
    else
      redo unless split_old_bin(table, new_table, i, node, node_hash, forwarder)
    end
    if bin > 0
      i = (bin -= 1)
    elsif locked_indexes && !locked_indexes.empty?
      bin = -1
      i = locked_indexes.pop
      locked_arr_idx = locked_indexes.size - 1
    else
      return new_table
    end
  end
end