class RuboCop::ResultCache

def self.cleanup(config_store, verbose, cache_root = nil)

there's parallel execution and the cache is shared.
that some other RuboCop process was just about to read the file, when
cleaning should be done relatively seldom, since there is a slight risk
in the cache are removed. The reason for removing so much is that
threshold MaxFilesInCache has been exceeded, the oldest 50% all the files
Remove old files so that the cache doesn't grow too big. When the
def self.cleanup(config_store, verbose, cache_root = nil)
  return if inhibit_cleanup # OPTIMIZE: For faster testing
  cache_root ||= cache_root(config_store)
  return unless File.exist?(cache_root)
  files, dirs = Find.find(cache_root).partition { |path| File.file?(path) }
  if files.length > config_store.for('.')['AllCops']['MaxFilesInCache'] &&
     files.length > 1
    # Add 1 to half the number of files, so that we remove the file if
    # there's only 1 left.
    remove_count = 1 + files.length / 2
    if verbose
      puts "Removing the #{remove_count} oldest files from #{cache_root}"
    end
    sorted = files.sort_by { |path| File.mtime(path) }
    begin
      sorted[0, remove_count].each { |path| File.delete(path) }
      dirs.each { |dir| Dir.rmdir(dir) if Dir["#{dir}/*"].empty? }
    rescue Errno::ENOENT
      # This can happen if parallel RuboCop invocations try to remove the
      # same files. No problem.
      puts $ERROR_INFO if verbose
    end
  end
end