class ActiveSupport::Cache::MemoryStore
MemoryStore is thread-safe.
compression in terms of cpu use.
However, when compression is enabled, it still pays the full cost of
as other Store implementations, as it does not send data over a network.
values by default. MemoryStore does not benefit from compression as much
Unlike other Cache store implementations, MemoryStore does not compress
of the maximum size by removing the least recently used entries.
a cleanup will occur which tries to prune the cache down to three quarters
initializer (default is 32Mb). When the cache exceeds the allotted size,
This cache has a bounded size specified by the :size
options to the
appropriate cache in that scenario.
to share cache data with each other and this may not be the most
then this means that Rails server process instances won’t be able
(which is the case if you’re using Phusion Passenger or puma clustered mode),
same process. If you’re running multiple Ruby on Rails server processes
A cache store implementation which stores everything into memory in the
def self.supports_cache_versioning?
def self.supports_cache_versioning? true end
def cached_size(key, payload)
def cached_size(key, payload) key.to_s.bytesize + payload.bytesize + PER_ENTRY_OVERHEAD end
def cleanup(options = nil)
def cleanup(options = nil) options = merged_options(options) instrument(:cleanup, size: @data.size) do keys = synchronize { @data.keys } keys.each do |key| entry = @data[key] delete_entry(key, **options) if entry && entry.expired? end end end
def clear(options = nil)
def clear(options = nil) synchronize do @data.clear @cache_size = 0 end end
def decrement(name, amount = 1, options = nil)
def decrement(name, amount = 1, options = nil) modify_value(name, -amount, options) end
def default_coder
def default_coder DupCoder end
def delete_entry(key, **options)
def delete_entry(key, **options) synchronize do payload = @data.delete(key) @cache_size -= cached_size(key, payload) if payload !!payload end end
def delete_matched(matcher, options = nil)
def delete_matched(matcher, options = nil) options = merged_options(options) instrument(:delete_matched, matcher.inspect) do matcher = key_matcher(matcher, options) keys = synchronize { @data.keys } keys.each do |key| delete_entry(key, **options) if key.match(matcher) end end end
def increment(name, amount = 1, options = nil)
def increment(name, amount = 1, options = nil) modify_value(name, amount, options) end
def initialize(options = nil)
def initialize(options = nil) options ||= {} # Disable compression by default. options[:compress] ||= false super(options) @data = {} @max_size = options[:size] || 32.megabytes @max_prune_time = options[:max_prune_time] || 2 @cache_size = 0 @monitor = Monitor.new @pruning = false end
def inspect # :nodoc:
def inspect # :nodoc: "#<#{self.class.name} entries=#{@data.size}, size=#{@cache_size}, options=#{@options.inspect}>" end
def modify_value(name, amount, options)
def modify_value(name, amount, options) options = merged_options(options) synchronize do if num = read(name, options) num = num.to_i + amount write(name, num, options) num end end end
def prune(target_size, max_time = nil)
To ensure entries fit within the specified memory prune the cache by removing the least
def prune(target_size, max_time = nil) return if pruning? @pruning = true begin start_time = Process.clock_gettime(Process::CLOCK_MONOTONIC) cleanup instrument(:prune, target_size, from: @cache_size) do keys = synchronize { @data.keys } keys.each do |key| delete_entry(key, **options) return if @cache_size <= target_size || (max_time && Process.clock_gettime(Process::CLOCK_MONOTONIC) - start_time > max_time) end end ensure @pruning = false end end
def pruning?
def pruning? @pruning end
def read_entry(key, **options)
def read_entry(key, **options) entry = nil synchronize do payload = @data.delete(key) if payload @data[key] = payload entry = deserialize_entry(payload) end end entry end
def synchronize(&block) # :nodoc:
is not thread safe.
Synchronize calls to the cache. This should be called wherever the underlying cache implementation
def synchronize(&block) # :nodoc: @monitor.synchronize(&block) end
def write_entry(key, entry, **options)
def write_entry(key, entry, **options) payload = serialize_entry(entry, **options) synchronize do return false if options[:unless_exist] && @data.key?(key) old_payload = @data[key] if old_payload @cache_size -= (old_payload.bytesize - payload.bytesize) else @cache_size += cached_size(key, payload) end @data[key] = payload prune(@max_size * 0.75, @max_prune_time) if @cache_size > @max_size true end end