class Concurrent::RubyThreadPoolExecutor
@!visibility private
@!macro thread_pool_options
@!macro thread_pool_executor
def active_count
def active_count synchronize do @pool.length - @ready.length end end
def can_overflow?
def can_overflow? synchronize { ns_limited_queue? } end
def completed_task_count
def completed_task_count synchronize { @completed_task_count } end
def initialize(opts = {})
def initialize(opts = {}) super(opts) end
def largest_length
def largest_length synchronize { @largest_length } end
def length
def length synchronize { @pool.length } end
def ns_add_busy_worker
-
(nil, Worker)
- nil of max capacity is reached
def ns_add_busy_worker return if @pool.size >= @max_length @workers_counter += 1 @pool << (worker = Worker.new(self, @workers_counter)) @largest_length = @pool.length if @pool.length > @largest_length worker end
def ns_assign_worker(*args, &task)
-
(true, false)
- if task is assigned to a worker
def ns_assign_worker(*args, &task) # keep growing if the pool is not at the minimum yet worker, _ = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker if worker worker << [task, args] true else false end rescue ThreadError # Raised when the operating system refuses to create the new thread return false end
def ns_enqueue(*args, &task)
-
(true, false)
- if enqueued
def ns_enqueue(*args, &task) return false if @synchronous if !ns_limited_queue? || @queue.size < @max_queue @queue << [task, args] true else false end end
def ns_execute(*args, &task)
def ns_execute(*args, &task) ns_reset_if_forked if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task) @scheduled_task_count += 1 else return fallback_action(*args, &task) end ns_prune_pool if @next_gc_time < Concurrent.monotonic_time nil end
def ns_initialize(opts)
def ns_initialize(opts) @min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i @max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i @idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i @max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i @synchronous = opts.fetch(:synchronous, DEFAULT_SYNCHRONOUS) @fallback_policy = opts.fetch(:fallback_policy, :abort) raise ArgumentError.new("`synchronous` cannot be set unless `max_queue` is 0") if @synchronous && @max_queue > 0 raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICIES.include?(@fallback_policy) raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @max_length < DEFAULT_MIN_POOL_SIZE raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if @max_length > DEFAULT_MAX_POOL_SIZE raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @min_length < DEFAULT_MIN_POOL_SIZE raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length @pool = [] # all workers @ready = [] # used as a stash (most idle worker is at the start) @queue = [] # used as queue # @ready or @queue is empty at all times @scheduled_task_count = 0 @completed_task_count = 0 @largest_length = 0 @workers_counter = 0 @ruby_pid = $$ # detects if Ruby has forked @gc_interval = opts.fetch(:gc_interval, @idletime / 2.0).to_i # undocumented @next_gc_time = Concurrent.monotonic_time + @gc_interval end
def ns_kill_execution
def ns_kill_execution # TODO log out unprocessed tasks in queue # TODO try to shutdown first? @pool.each(&:kill) @pool.clear @ready.clear end
def ns_limited_queue?
def ns_limited_queue? @max_queue != 0 end
def ns_prune_pool
try oldest worker if it is idle for enough time, it's returned back at the start
def ns_prune_pool now = Concurrent.monotonic_time stopped_workers = 0 while !@ready.empty? && (@pool.size - stopped_workers > @min_length) worker, last_message = @ready.first if now - last_message > self.idletime stopped_workers += 1 @ready.shift worker << :stop else break end end @next_gc_time = Concurrent.monotonic_time + @gc_interval end
def ns_ready_worker(worker, last_message, success = true)
handle ready worker, giving it new job or assigning back to @ready
def ns_ready_worker(worker, last_message, success = true) task_and_args = @queue.shift if task_and_args worker << task_and_args else # stop workers when !running?, do not return them to @ready if running? raise unless last_message @ready.push([worker, last_message]) else worker.stop end end end
def ns_remove_busy_worker(worker)
removes a worker which is not in not tracked in @ready
def ns_remove_busy_worker(worker) @pool.delete(worker) stopped_event.set if @pool.empty? && !running? true end
def ns_reset_if_forked
def ns_reset_if_forked if $$ != @ruby_pid @queue.clear @ready.clear @pool.clear @scheduled_task_count = 0 @completed_task_count = 0 @largest_length = 0 @workers_counter = 0 @ruby_pid = $$ end end
def ns_shutdown_execution
def ns_shutdown_execution ns_reset_if_forked if @pool.empty? # nothing to do stopped_event.set end if @queue.empty? # no more tasks will be accepted, just stop all workers @pool.each(&:stop) end end
def ns_worker_died(worker)
def ns_worker_died(worker) ns_remove_busy_worker worker replacement_worker = ns_add_busy_worker ns_ready_worker replacement_worker, Concurrent.monotonic_time, false if replacement_worker end
def prune_pool
def prune_pool synchronize { ns_prune_pool } end
def queue_length
def queue_length synchronize { @queue.length } end
def ready_worker(worker, last_message)
def ready_worker(worker, last_message) synchronize { ns_ready_worker worker, last_message } end
def remaining_capacity
def remaining_capacity synchronize do if ns_limited_queue? @max_queue - @queue.length else -1 end end end
def remove_busy_worker(worker)
def remove_busy_worker(worker) synchronize { ns_remove_busy_worker worker } end
def scheduled_task_count
def scheduled_task_count synchronize { @scheduled_task_count } end
def worker_died(worker)
def worker_died(worker) synchronize { ns_worker_died worker } end
def worker_task_completed
def worker_task_completed synchronize { @completed_task_count += 1 } end