class Bundler::CompactIndexClient::CacheFile

if digests are given, the checksums will be verified
write cache files in a way that is robust to concurrent modifications

def self.copy(path, &block)

Initialize with a copy of the original file, then yield the instance.
def self.copy(path, &block)
  new(path) do |file|
    file.initialize_digests
    SharedHelpers.filesystem_access(path, :read) do
      path.open("rb") do |s|
        file.open {|f| IO.copy_stream(s, f) }
      end
    end
    yield file
  end
end

def self.write(path, data, digests = nil)

Write data to a temp file, then replace the original file with it verifying the digests if given.
def self.write(path, data, digests = nil)
  return unless data
  new(path) do |file|
    file.digests = digests
    file.write(data)
  end
end

def append(data)

Returns false without appending when no digests since appending is too error prone to do without digests.
def append(data)
  return false unless digests?
  open("a") {|f| f.write data }
  verify && commit
end

def close

The file is permanently closed.
Remove the temp file without replacing the original file.
def close
  return if @closed
  FileUtils.remove_file(path) if @path&.file?
  @closed = true
end

def commit

The file is permanently closed.
Replace the original file with the temp file without verifying digests.
def commit
  raise ClosedError, "Cannot commit closed file" if @closed
  SharedHelpers.filesystem_access(original_path, :write) do
    FileUtils.mv(path, original_path)
  end
  @closed = true
end

def commit!

def commit!
  verify || raise(DigestMismatchError.new(@base64digests, @expected_digests))
  commit
end

def digests=(expected_digests)

set the digests that will be verified at the end
def digests=(expected_digests)
  @expected_digests = expected_digests
  if @expected_digests.nil?
    @digests = nil
  elsif @digests
    @digests = @digests.slice(*@expected_digests.keys)
  else
    initialize_digests(@expected_digests.keys)
  end
end

def digests?

def digests?
  @digests&.any?
end

def initialize(original_path, &block)

def initialize(original_path, &block)
  @original_path = original_path
  @perm = original_path.file? ? original_path.stat.mode : DEFAULT_FILE_MODE
  @path = original_path.sub(/$/, ".#{$$}.tmp")
  return unless block_given?
  begin
    yield self
  ensure
    close
  end
end

def initialize_digests(keys = nil)

initialize the digests using CompactIndexClient::SUPPORTED_DIGESTS, or a subset based on keys.
def initialize_digests(keys = nil)
  @digests = keys ? SUPPORTED_DIGESTS.slice(*keys) : SUPPORTED_DIGESTS.dup
  @digests.transform_values! {|algo_class| SharedHelpers.digest(algo_class).new }
end

def md5

remove this method when we stop generating md5 digests for legacy etags
def md5
  @digests && @digests["md5"]
end

def open(write_mode = "wb", perm = @perm, &block)

Open the temp file for writing, reusing original permissions, yielding the IO object.
def open(write_mode = "wb", perm = @perm, &block)
  raise ClosedError, "Cannot reopen closed file" if @closed
  SharedHelpers.filesystem_access(path, :write) do
    path.open(write_mode, perm) do |f|
      yield digests? ? Gem::Package::DigestIO.new(f, @digests) : f
    end
  end
end

def reset_digests

reset the digests so they don't contain any previously read data
def reset_digests
  @digests&.each_value(&:reset)
end

def size

def size
  path.size
end

def verify

Verify the digests, returning true on match, false on mismatch.
def verify
  return true unless @expected_digests && digests?
  @base64digests = @digests.transform_values!(&:base64digest)
  @digests = nil
  @base64digests.all? {|algo, digest| @expected_digests[algo] == digest }
end

def write(data)

def write(data)
  reset_digests
  open {|f| f.write data }
  commit!
end