class ViewModel::ActiveRecord::Cache
Cache for ViewModels that wrap ActiveRecord models.
def cache_name
Statically version the terminal cache based on the deep schema versions of
def cache_name "#{@viewmodel_class.name}_#{cache_version}" end
def cache_version
def cache_version version_string = @viewmodel_class.deep_schema_version(include_shared: false).to_a.sort.join(',') Base64.urlsafe_encode64(Digest::MD5.digest(version_string)) end
def clear
def clear @cache_group.invalidate_cache_group end
def create_default_cache_group
def create_default_cache_group IknowCache.register_group(@viewmodel_class.name, :id) end
def delete(*ids)
def delete(*ids) ids.each do |id| @cache_group.delete_all(key_for(id)) end end
def fetch(ids, initial_viewmodels: nil, locked: false, serialize_context: @viewmodel_class.new_serialize_context)
def fetch(ids, initial_viewmodels: nil, locked: false, serialize_context: @viewmodel_class.new_serialize_context) data_serializations = Array.new(ids.size) worker = CacheWorker.new(serialize_context: serialize_context) # If initial root viewmodels were provided, visit them to ensure that they # are visible. Other than this, no traversal callbacks are performed, as a # view may be resolved from the cache without ever loading its viewmodel. # Note that if unlocked, these views will be reloaded as part of obtaining a # share lock. If the visibility of this viewmodel can change due to edits, # it is necessary to obtain a lock before calling `fetch`. initial_viewmodels&.each do |v| serialize_context.run_callback(ViewModel::Callbacks::Hook::BeforeVisit, v) serialize_context.run_callback(ViewModel::Callbacks::Hook::AfterVisit, v) end # Collect input array positions for each id, allowing duplicates positions = ids.each_with_index.with_object({}) do |(id, i), h| (h[id] ||= []) << i end # Fetch duplicates only once ids = positions.keys # Load existing serializations from the cache cached_serializations = worker.load_from_cache(self, ids) cached_serializations.each do |id, data| positions[id].each do |idx| data_serializations[idx] = data end end # Resolve and serialize missing views missing_ids = ids.to_set.subtract(cached_serializations.keys) # If initial viewmodels have been locked, we can serialize them for cache # misses. available_viewmodels = if locked initial_viewmodels&.each_with_object({}) do |vm, h| h[vm.id] = vm if missing_ids.include?(vm.id) end end @viewmodel_class.transaction do # Load remaining views and serialize viewmodels = worker.find_and_preload_viewmodels(@viewmodel_class, missing_ids.to_a, available_viewmodels: available_viewmodels) loaded_serializations = worker.serialize_and_cache(viewmodels) loaded_serializations.each do |id, data| positions[id].each do |idx| data_serializations[idx] = data end end # Resolve references worker.resolve_references! return data_serializations, worker.resolved_references end end
def fetch_by_viewmodel(viewmodels, locked: false, serialize_context: @viewmodel_class.new_serialize_context)
def fetch_by_viewmodel(viewmodels, locked: false, serialize_context: @viewmodel_class.new_serialize_context) ids = viewmodels.map(&:id) fetch(ids, initial_viewmodels: viewmodels, locked: false, serialize_context: serialize_context) end
def id_for(key)
def id_for(key) key[:id] end
def initialize(viewmodel_class, cache_group: nil)
def initialize(viewmodel_class, cache_group: nil) @viewmodel_class = viewmodel_class @cache_group = cache_group || create_default_cache_group # requires @viewmodel_class @cache = @cache_group.register_cache(cache_name) end
def key_for(id)
def key_for(id) cache.key.new(id) end
def load(ids, serialize_context:)
def load(ids, serialize_context:) keys = ids.map { |id| key_for(id) } results = cache.read_multi(keys) results.transform_keys! { |key| id_for(key) } end
def store(id, data_serialization, ref_cache, serialize_context:)
def store(id, data_serialization, ref_cache, serialize_context:) cache.write(key_for(id), { data: data_serialization, ref_cache: ref_cache }) end