# frozen_string_literal: truerequire"monitor"moduleActiveSupportmoduleCache# A cache store implementation which stores everything into memory in the# same process. If you're running multiple Ruby on Rails server processes# (which is the case if you're using Phusion Passenger or puma clustered mode),# then this means that Rails server process instances won't be able# to share cache data with each other and this may not be the most# appropriate cache in that scenario.## This cache has a bounded size specified by the +:size+ options to the# initializer (default is 32Mb). When the cache exceeds the allotted size,# a cleanup will occur which tries to prune the cache down to three quarters# of the maximum size by removing the least recently used entries.## Unlike other Cache store implementations, MemoryStore does not compress# values by default. MemoryStore does not benefit from compression as much# as other Store implementations, as it does not send data over a network.# However, when compression is enabled, it still pays the full cost of# compression in terms of cpu use.## MemoryStore is thread-safe.classMemoryStore<StoremoduleDupCoder# :nodoc:extendselfdefdump(entry)entry.dup_value!unlessentry.compressed?entryenddefdump_compressed(entry,threshold)entry=entry.compressed(threshold)entry.dup_value!unlessentry.compressed?entryenddefload(entry)entry=entry.dupentry.dup_value!entryendenddefinitialize(options=nil)options||={}# Disable compression by default.options[:compress]||=falsesuper(options)@data={}@max_size=options[:size]||32.megabytes@max_prune_time=options[:max_prune_time]||2@cache_size=0@monitor=Monitor.new@pruning=falseend# Advertise cache versioning support.defself.supports_cache_versioning?trueend# Delete all data stored in a given cache store.defclear(options=nil)synchronizedo@data.clear@cache_size=0endend# Preemptively iterates through all stored keys and removes the ones which have expired.defcleanup(options=nil)options=merged_options(options)instrument(:cleanup,size: @data.size)dokeys=synchronize{@data.keys}keys.eachdo|key|entry=@data[key]delete_entry(key,**options)ifentry&&entry.expired?endendend# To ensure entries fit within the specified memory prune the cache by removing the least# recently accessed entries.defprune(target_size,max_time=nil)returnifpruning?@pruning=truebeginstart_time=Process.clock_gettime(Process::CLOCK_MONOTONIC)cleanupinstrument(:prune,target_size,from: @cache_size)dokeys=synchronize{@data.keys}keys.eachdo|key|delete_entry(key,**options)returnif@cache_size<=target_size||(max_time&&Process.clock_gettime(Process::CLOCK_MONOTONIC)-start_time>max_time)endendensure@pruning=falseendend# Returns true if the cache is currently being pruned.defpruning?@pruningend# Increment an integer value in the cache.defincrement(name,amount=1,options=nil)modify_value(name,amount,options)end# Decrement an integer value in the cache.defdecrement(name,amount=1,options=nil)modify_value(name,-amount,options)end# Deletes cache entries if the cache key matches a given pattern.defdelete_matched(matcher,options=nil)options=merged_options(options)instrument(:delete_matched,matcher.inspect)domatcher=key_matcher(matcher,options)keys=synchronize{@data.keys}keys.eachdo|key|delete_entry(key,**options)ifkey.match(matcher)endendenddefinspect# :nodoc:"#<#{self.class.name} entries=#{@data.size}, size=#{@cache_size}, options=#{@options.inspect}>"end# Synchronize calls to the cache. This should be called wherever the underlying cache implementation# is not thread safe.defsynchronize(&block)# :nodoc:@monitor.synchronize(&block)endprivatePER_ENTRY_OVERHEAD=240defdefault_coderDupCoderenddefcached_size(key,payload)key.to_s.bytesize+payload.bytesize+PER_ENTRY_OVERHEADenddefread_entry(key,**options)entry=nilsynchronizedopayload=@data.delete(key)ifpayload@data[key]=payloadentry=deserialize_entry(payload)endendentryenddefwrite_entry(key,entry,**options)payload=serialize_entry(entry,**options)synchronizedoreturnfalseifoptions[:unless_exist]&&@data.key?(key)old_payload=@data[key]ifold_payload@cache_size-=(old_payload.bytesize-payload.bytesize)else@cache_size+=cached_size(key,payload)end@data[key]=payloadprune(@max_size*0.75,@max_prune_time)if@cache_size>@max_sizetrueendenddefdelete_entry(key,**options)synchronizedopayload=@data.delete(key)@cache_size-=cached_size(key,payload)ifpayload!!payloadendenddefmodify_value(name,amount,options)options=merged_options(options)synchronizedoifnum=read(name,options)num=num.to_i+amountwrite(name,num,options)numendendendendendend