class LRUHash
def [](key)
def [](key) get(key) end
def []=(key, resource)
def []=(key, resource) set(key, resource) end
def clear
def clear @lock.synchronize { @table.clear } end
def clear_unused_resources
def clear_unused_resources payload = { size: @table.size, examined: 0, cleared: 0, elapsed: nil, } timer_start = Process.clock_gettime(Process::CLOCK_MONOTONIC) ran = try_synchronize do # Clears resources that have not been used # in the last 5 minutes (default value of Semian.minimum_lru_time). stop_time = Process.clock_gettime(Process::CLOCK_MONOTONIC) - @min_time @table.each do |_, resource| payload[:examined] += 1 # The update times of the resources in the LRU are monotonically increasing, # time, so we can stop looking once we find the first resource with an # update time after the stop_time. break if resource.updated_at > stop_time next if resource.in_use? resource = @table.delete(resource.name) if resource payload[:cleared] += 1 resource.destroy end end end if ran payload[:elapsed] = Process.clock_gettime(Process::CLOCK_MONOTONIC) - timer_start Semian.notify(:lru_hash_gc, self, nil, nil, payload) end end
def count(&block)
def count(&block) @lock.synchronize { @table.count(&block) } end
def delete(key)
def delete(key) @lock.synchronize do @table.delete(key) end end
def empty?
def empty? @lock.synchronize { @table.empty? } end
def get(key)
Update the `updated_at` field so we can use it later do decide if the
re-inserting it effectively moves it to the front of the cache.
order that the corresponding keys were inserted." Deleting a key and
This method uses the property that "Hashes enumerate their values in the
def get(key) @lock.synchronize do found = @table.delete(key) if found @table[key] = found found.updated_at = Process.clock_gettime(Process::CLOCK_MONOTONIC) end found end end
def initialize(max_size: Semian.maximum_lru_size, min_time: Semian.minimum_lru_time)
If the max_size is 0, the garbage collection will be very aggressive and
circuits to disparate endpoints (or your circuit names are bad).
cache can grow without bound. This usually means that you have many active
recently than +min_time+, the garbage collection will not remove them and the
more than +max_size+ entries in the cache, but they've all been updated more
The +min_time+ is a stronger guarantee than +max_size+. That is, if there are
Note:
+min_time+ The minimum time in seconds a resource can live in the cache
+max_size+ The maximum size of the table
Arguments:
Create an LRU hash
def initialize(max_size: Semian.maximum_lru_size, min_time: Semian.minimum_lru_time) @max_size = max_size @min_time = min_time @table = {} @lock = if Semian.thread_safe? ::Thread::Mutex.new else ::LRUHash::NoopMutex.new end end
def keys
def keys @lock.synchronize { @table.keys } end
def set(key, resource)
def set(key, resource) @lock.synchronize do @table.delete(key) @table[key] = resource resource.updated_at = Process.clock_gettime(Process::CLOCK_MONOTONIC) end clear_unused_resources if @table.length > @max_size end
def size
def size @lock.synchronize { @table.size } end
def try_synchronize(&block)
def try_synchronize(&block) Thread.handle_interrupt(EXCEPTION_NEVER) do return false unless @lock.try_lock Thread.handle_interrupt(EXCEPTION_IMMEDIATE, &block) true ensure @lock.unlock if @lock.owned? end end
def values
def values @lock.synchronize { @table.values } end