class Opal::Cache::FileCache
def self.dir_writable?(*paths)
def self.dir_writable?(*paths) dir = nil paths = paths.reduce([]) do |a, b| [*a, dir = a.last ? File.expand_path(b, a.last) : b] end File.exist?(paths.first) && paths.reverse.all? do |i| !File.exist?(i) || (File.directory?(i) && File.writable?(i)) end dir end
def self.find_dir
def self.find_dir @find_dir ||= case # Try to write cache into a directory pointed by an environment variable if present when dir = ENV['OPAL_CACHE_DIR'] FileUtils.mkdir_p(dir) dir # Otherwise, we write to the place where Opal is installed... # I don't think it's a good location to store cache, so many things can go wrong. # when dir = dir_writable?(Opal.gem_dir, '..', 'tmp', 'cache') # FileUtils.mkdir_p(dir) # FileUtils.chmod(0o700, dir) # dir # Otherwise, ~/.cache/opal... when dir = dir_writable?(Dir.home, '.cache', 'opal') FileUtils.mkdir_p(dir) FileUtils.chmod(0o700, dir) dir # Only /tmp is writable... or isn't it? when (dir = dir_writable?('/tmp', "opal-cache-#{ENV['USER']}")) && File.sticky?('/tmp') FileUtils.mkdir_p(dir) FileUtils.chmod(0o700, dir) dir # No way... we can't write anywhere... else warn "Couldn't find a writable path to store Opal cache. " \ 'Try setting OPAL_CACHE_DIR environment variable' nil end end
def cache_filename_for(key)
def cache_filename_for(key) r}/#{key}.rbm.gz"
def get(key)
def get(key) file = cache_filename_for(key) if File.exist?(file) FileUtils.touch(file) out = File.binread(file) out = Zlib.gunzip(out) Marshal.load(out) # rubocop:disable Security/MarshalLoad end rescue Zlib::GzipFile::Error nil end
def initialize(dir: nil, max_size: nil)
def initialize(dir: nil, max_size: nil) @dir = dir || self.class.find_dir # Store at most 32MB of cache - de facto this 32MB is larger, # as we don't account for inode size for instance. In fact, it's # about 50M. Also we run this check before anything runs, so things # may go up to 64M or even larger. @max_size = max_size || 32 * 1024 * 1024 tidy_up_cache end
def set(key, data)
def set(key, data) file = cache_filename_for(key) out = Marshal.dump(data) # Sometimes `Zlib::BufError` gets raised, unsure why, makes no sense, possibly # some race condition (see https://github.com/ruby/zlib/issues/49). # Limit the number of retries to avoid infinite loops. retries = 5 begin out = Zlib.gzip(out, level: 9) rescue Zlib::BufError warn "\n[Opal]: Zlib::BufError; retrying (#{retries} retries left)" retries -= 1 retry if retries > 0 end File.binwrite(file, out) end
def tidy_up_cache
Remove cache entries that overflow our cache limit... and which
def tidy_up_cache s = Dir[@dir + '/*.rbm.gz'] s_stats = entries.map { |entry| [entry, File.stat(entry)] } um = entries_stats.map { |_entry, stat| stat.size }.sum unless size_sum > @max_size t, we try to get the oldest files first. , what's more important, is that we try to get the least ntly used files first. Filesystems with relatime or noatime get this wrong, but it doesn't matter that much, because previous sort got things "maybe right". s_stats = entries_stats.sort_by { |_entry, stat| [stat.mtime, stat.atime] } s_stats.each do |entry, stat| _sum -= stat.size .unlink(entry) don't need to work this out anymore - we reached our goal. k unless size_sum > @max_size rrno::ENOENT othing, this comes from multithreading. We will tidy up at next chance.