class Async::Scheduler
Handles scheduling of fibers. Implements the fiber scheduler interface.
def self.supported?
Whether the fiber scheduler is supported.
def self.supported? true end
def address_resolve(hostname)
def address_resolve(hostname) # On some platforms, hostnames may contain a device-specific suffix (e.g. %en0). We need to strip this before resolving. # See <https://github.com/socketry/async/issues/180> for more details. hostname = hostname.split("%", 2).first ::Resolv.getaddresses(hostname) end
def async(*arguments, **options, &block)
- With no replacement.
def async(*arguments, **options, &block) Kernel.raise ClosedError if @selector.nil? task = Task.new(Task.current? || self, **options, &block) # I want to take a moment to explain the logic of this. # When calling an async block, we deterministically execute it until the # first blocking operation. We don't *have* to do this - we could schedule # it for later execution, but it's useful to: # - Fail at the point of the method call where possible. # - Execute determinstically where possible. # - Avoid scheduler overhead if no blocking operation is performed. task.run(*arguments) # Console.debug "Initial execution of task #{fiber} complete (#{result} -> #{fiber.alive?})..." return task end
def block(blocker, timeout)
Invoked when a fiber tries to perform a blocking operation which cannot continue. A corresponding call {unblock} must be performed to allow this fiber to continue.
def block(blocker, timeout) # $stderr.puts "block(#{blocker}, #{Fiber.current}, #{timeout})" fiber = Fiber.current if timeout timer = @timers.after(timeout) do if fiber.alive? fiber.transfer(false) end end end begin @blocked += 1 @selector.transfer ensure @blocked -= 1 end ensure timer&.cancel! end
def close
Terminate all child tasks and close the scheduler.
def close self.run_loop do until self.terminate self.run_once! end end Kernel.raise "Closing scheduler with blocked operations!" if @blocked > 0 ensure # We want `@selector = nil` to be a visible side effect from this point forward, specifically in `#interrupt` and `#unblock`. If the selector is closed, then we don't want to push any fibers to it. selector = @selector @selector = nil selector&.close consume end
def closed?
@returns [Boolean] Whether the scheduler has been closed.
def closed? @selector.nil? end
def fiber(...)
def fiber(...) return async(...).fiber end
def get_timeout(io)
def get_timeout(io) out
def get_timeout(io)
def get_timeout(io)
def initialize(parent = nil, selector: nil)
@parameter parent [Node | Nil] The parent node to use for task hierarchy.
@public Since `stable-v1`.
Create a new scheduler.
def initialize(parent = nil, selector: nil) super(parent) @selector = selector || ::IO::Event::Selector.new(Fiber.current) @interrupted = false @blocked = 0 @busy_time = 0.0 @idle_time = 0.0 @timers = ::IO::Event::Timers.new end
def interrupt
Interrupt the event loop and cause it to exit.
def interrupt @interrupted = true @selector&.wakeup end
def interrupted?
Checks and clears the interrupted state of the scheduler.
def interrupted? errupted rupted = false true ad.pending_interrupt? true false
def io_read(io, buffer, length, offset = 0)
def io_read(io, buffer, length, offset = 0) fiber = Fiber.current if timeout = get_timeout(io) timer = @timers.after(timeout) do fiber.raise(::IO::TimeoutError, "Timeout while waiting for IO to become readable!") end end @selector.io_read(fiber, io, buffer, length, offset) ensure timer&.cancel! end
def io_wait(io, events, timeout = nil)
def io_wait(io, events, timeout = nil) fiber = Fiber.current if timeout # If an explicit timeout is specified, we expect that the user will handle it themselves: timer = @timers.after(timeout) do fiber.transfer end elsif timeout = get_timeout(io) # Otherwise, if we default to the io's timeout, we raise an exception: timer = @timers.after(timeout) do fiber.raise(::IO::TimeoutError, "Timeout while waiting for IO to become ready!") end end return @selector.io_wait(fiber, io, events) ensure timer&.cancel! end
def io_write(io, buffer, length, offset = 0)
def io_write(io, buffer, length, offset = 0) fiber = Fiber.current if timeout = get_timeout(io) timer = @timers.after(timeout) do fiber.raise(::IO::TimeoutError, "Timeout while waiting for IO to become writable!") end end @selector.io_write(fiber, io, buffer, length, offset) ensure timer&.cancel! end
def kernel_sleep(duration = nil)
def kernel_sleep(duration = nil) if duration self.block(nil, duration) else self.transfer end end
def load
Compute the scheduler load according to the busy and idle times that are updated by the run loop.
def load total_time = @busy_time + @idle_time # If the total time is zero, then the load is zero: return 0.0 if total_time.zero? # We normalize to a 1 second window: if total_time > 1.0 ratio = 1.0 / total_time @busy_time *= ratio @idle_time *= ratio # We don't need to divide here as we've already normalised it to a 1s window: return @busy_time else return @busy_time / total_time end end
def process_wait(pid, flags)
@returns [Process::Status] A process status instance.
@parameter flags [Integer] A bit-mask of flags suitable for `Process::Status.wait`.
@parameter pid [Integer] The process ID to wait for.
Wait for the specified process ID to exit.
def process_wait(pid, flags) return @selector.process_wait(Fiber.current, pid, flags) end
def push(fiber)
Schedule a fiber (or equivalent object) to be resumed on the next loop through the reactor.
def push(fiber) @selector.push(fiber) end
def raise(...)
@parameter fiber [Fiber] The fiber to raise the exception on.
This internally schedules the current fiber to be ready, before raising the exception, so that it will later resume execution.
Raise an exception on a specified fiber with the given arguments.
def raise(...) @selector.raise(...) end
def resume(fiber, *arguments)
@parameter fiber [Fiber] The fiber to resume.
Resume execution of the specified fiber.
def resume(fiber, *arguments) @selector.resume(fiber, *arguments) end
def run(...)
def run(...) Kernel.raise ClosedError if @selector.nil? initial_task = self.async(...) if block_given? self.run_loop do run_once end return initial_task end
def run_loop(&block)
def run_loop(&block) pt = nil heory, we could use Exception here to be a little bit safer, but we've only shown the case for SignalException to be a problem, so let's not over-engineer this. .handle_interrupt(::SignalException => :never) do self.interrupted? we are finished, we need to exit: k unless yield Interrupt => interrupt .handle_interrupt(::SignalException => :never) do stop e event loop was interrupted, and we finished exiting normally (due to the interrupt), we need to re-raise the interrupt so that the caller can handle it too. raise(interrupt) if interrupt
def run_once(timeout = nil)
@parameter timeout [Float | Nil] The maximum timeout, or if nil, indefinite.
Does not handle interrupts.
Run one iteration of the event loop.
def run_once(timeout = nil) Kernel.raise "Running scheduler on non-blocking fiber!" unless Fiber.blocking? if self.finished? self.stop end # If we are finished, we stop the task tree and exit: if @children.nil? return false end return run_once!(timeout) end
def run_once!(timeout = nil)
@parameter timeout [Float | Nil] The maximum timeout, or if nil, indefinite.
When terminating the event loop, we already know we are finished. So we don't need to check the task tree. This is a logical requirement because `run_once` ignores transient tasks. For example, a single top level transient task is not enough to keep the reactor running, but during termination we must still process it in order to terminate child tasks.
Run one iteration of the event loop.
def run_once!(timeout = nil) ime = Async::Clock.now l = @timers.wait_interval ere is no interval to wait (thus no timers), and no tasks, we could be done: rval.nil? w the user to specify a maximum interval if we would otherwise be sleeping indefinitely: al = timeout nterval < 0 ave timers ready to fire, don't sleep in the selctor: al = 0 imeout and interval > timeout al = timeout tor.select(interval) Errno::EINTR re. .fire te load: e = Async::Clock.now uration = end_time - start_time ration = @selector.idle_duration ration = total_duration - idle_duration ime += busy_duration ime += idle_duration eactor still has work to do: true
def scheduler_close(error = $!)
Invoked when the fiber scheduler is being closed.
def scheduler_close(error = $!) # If the execution context (thread) was handling an exception, we want to exit as quickly as possible: unless error self.run end ensure self.close end
def stop
def stop @children&.each do |child| child.stop end end
def terminate
def terminate # If that doesn't work, take more serious action: @children&.each do |child| child.terminate end return @children.nil? end
def timeout_after(duration, exception, message, &block)
def timeout_after(duration, exception, message, &block) with_timeout(duration, exception, message) do |timer| yield duration end end
def to_s
def to_s "\#<#{self.description} #{@children&.size || 0} children (#{stopped? ? 'stopped' : 'running'})>" end
def transfer
def transfer @selector.transfer end
def unblock(blocker, fiber)
def unblock(blocker, fiber) # $stderr.puts "unblock(#{blocker}, #{fiber})" # This operation is protected by the GVL: if selector = @selector selector.push(fiber) selector.wakeup end end
def with_timeout(duration, exception = TimeoutError, message = "execution expired", &block)
Invoke the block, but after the specified timeout, raise {TimeoutError} in any currenly blocking operation. If the block runs to completion before the timeout occurs or there are no non-blocking operations after the timeout expires, the code will complete without any exception.
def with_timeout(duration, exception = TimeoutError, message = "execution expired", &block) fiber = Fiber.current timer = @timers.after(duration) do if fiber.alive? fiber.raise(exception, message) end end yield timer ensure timer&.cancel! end
def yield
def yield @selector.yield end